From 69255e746890274e887ba36a403019380cde0b48 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 20 Dec 2021 19:56:41 +0000 Subject: firmware: arm_scmi: Add support for atomic transports An SCMI transport can be configured as .atomic_enabled in order to signal to the SCMI core that all its TX path is executed in atomic context and that, when requested, polling mode should be used while waiting for command responses. When a specific platform configuration had properly configured such a transport as .atomic_enabled, the SCMI core will also take care not to sleep in the corresponding RX path while waiting for a response if that specific command transaction was requested as atomic using polling mode. Asynchronous commands should not be used in an atomic context and so a warning is emitted if polling was requested for an asynchronous command. Add also a method to check, from the SCMI drivers, if the underlying SCMI transport is currently configured to support atomic transactions: this will be used by upper layers to determine if atomic requests can be supported at all on this SCMI instance. Link: https://lore.kernel.org/r/20211220195646.44498-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- include/linux/scmi_protocol.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 80e781c51ddc..9f895cb81818 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -612,6 +612,13 @@ struct scmi_notify_ops { * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol + * @is_transport_atomic: method to check if the underlying transport for this + * instance handle is configured to support atomic + * transactions for commands. + * Some users of the SCMI stack in the upper layers could + * be interested to know if they can assume SCMI + * command transactions associated to this handle will + * never sleep and act accordingly. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -622,6 +629,7 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); + bool (*is_transport_atomic)(const struct scmi_handle *handle); const struct scmi_notify_ops *notify_ops; }; -- cgit v1.2.3-71-gd317 From b5e7b59c3480f355910f9d2c6ece5857922a5e54 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 28 Sep 2021 09:47:57 +1000 Subject: NFS: change nfs_access_get_cached to only report the mask Currently the nfs_access_get_cached family of functions report a 'struct nfs_access_entry' as the result, with both .mask and .cred set. However the .cred is never used. This is probably good and there is no guarantee that it won't be freed before use. Change to only report the 'mask' - as this is all that is used or needed. Signed-off-by: NeilBrown Signed-off-by: Anna Schumaker --- fs/nfs/dir.c | 20 +++++++++----------- fs/nfs/nfs4proc.c | 18 +++++++++--------- include/linux/nfs_fs.h | 4 ++-- 3 files changed, 20 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 731d31015b6a..8487a6d69116 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2673,7 +2673,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2703,8 +2703,7 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred * spin_lock(&inode->i_lock); retry = false; } - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); err = 0; out: @@ -2716,7 +2715,7 @@ out_zap: return -ENOENT; } -static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res) +static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask) { /* Only check the most recently returned cache entry, * but do it without locking. @@ -2738,22 +2737,21 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre goto out; if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) goto out; - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; err = 0; out: rcu_read_unlock(); return err; } -int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct -nfs_access_entry *res, bool may_block) +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block) { int status; - status = nfs_access_get_cached_rcu(inode, cred, res); + status = nfs_access_get_cached_rcu(inode, cred, mask); if (status != 0) - status = nfs_access_get_cached_locked(inode, cred, res, + status = nfs_access_get_cached_locked(inode, cred, mask, may_block); return status; @@ -2874,7 +2872,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); if (status == 0) goto out_cached; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ee3bc79f6ca3..322ff45ad15c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7611,7 +7611,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, const char *key, const void *buf, size_t buflen, int flags) { - struct nfs_access_entry cache; + u32 mask; int ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) @@ -7626,8 +7626,8 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, * do a cached access check for the XA* flags to possibly avoid * doing an RPC and getting EACCES back. */ - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAWRITE)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAWRITE)) return -EACCES; } @@ -7648,14 +7648,14 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *key, void *buf, size_t buflen) { - struct nfs_access_entry cache; + u32 mask; ssize_t ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAREAD)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAREAD)) return -EACCES; } @@ -7680,13 +7680,13 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) ssize_t ret, size; char *buf; size_t buflen; - struct nfs_access_entry cache; + u32 mask; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return 0; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XALIST)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XALIST)) return 0; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 05f249f20f55..f33559acbcc2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -533,8 +533,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); -extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, - bool may_block); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block); /* * linux/fs/nfs/symlink.c -- cgit v1.2.3-71-gd317 From 73fbb3fa647bdb5b60469af8101c741ece03a825 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 28 Sep 2021 09:47:57 +1000 Subject: NFS: pass cred explicitly for access tests Storing the 'struct cred *' in nfs_access_entry is problematic. An active 'cred' can keep a 'struct key *' active, and a quota is imposed on the number of such keys that a user can maintain. Cached 'nfs_access_entry' structs have indefinite lifetime, and having these keep 'struct key's alive imposes on that quota. So a future patch will remove the ->cred ref from nfs_access_entry. To prepare, change various functions to not assume there is a 'cred' in the nfs_access_entry, but to pass the cred around explicitly. Signed-off-by: NeilBrown Signed-off-by: Anna Schumaker --- fs/nfs/dir.c | 17 ++++++++++------- fs/nfs/nfs3proc.c | 5 +++-- fs/nfs/nfs4proc.c | 12 +++++++----- include/linux/nfs_fs.h | 2 +- include/linux/nfs_xdr.h | 2 +- 5 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8487a6d69116..a34351ce79a2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2758,7 +2758,9 @@ int nfs_access_get_cached(struct inode *inode, const struct cred *cred, } EXPORT_SYMBOL_GPL(nfs_access_get_cached); -static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) +static void nfs_access_add_rbtree(struct inode *inode, + struct nfs_access_entry *set, + const struct cred *cred) { struct nfs_inode *nfsi = NFS_I(inode); struct rb_root *root_node = &nfsi->access_cache; @@ -2771,7 +2773,7 @@ static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry * while (*p != NULL) { parent = *p; entry = rb_entry(parent, struct nfs_access_entry, rb_node); - cmp = cred_fscmp(set->cred, entry->cred); + cmp = cred_fscmp(cred, entry->cred); if (cmp < 0) p = &parent->rb_left; @@ -2793,13 +2795,14 @@ found: nfs_access_free_entry(entry); } -void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) +void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set, + const struct cred *cred) { struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) return; RB_CLEAR_NODE(&cache->rb_node); - cache->cred = get_cred(set->cred); + cache->cred = get_cred(cred); cache->mask = set->mask; /* The above field assignments must be visible @@ -2807,7 +2810,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) * use rcu_assign_pointer, so just force the memory barrier. */ smp_wmb(); - nfs_access_add_rbtree(inode, cache); + nfs_access_add_rbtree(inode, cache, cred); /* Update accounting */ smp_mb__before_atomic(); @@ -2893,7 +2896,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) else cache.mask |= NFS_ACCESS_EXECUTE; cache.cred = cred; - status = NFS_PROTO(inode)->access(inode, &cache); + status = NFS_PROTO(inode)->access(inode, &cache, cred); if (status != 0) { if (status == -ESTALE) { if (!S_ISDIR(inode->i_mode)) @@ -2903,7 +2906,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) } goto out; } - nfs_access_add_cache(inode, &cache); + nfs_access_add_cache(inode, &cache, cred); out_cached: cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode); if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 7100514d306b..1597eef40d54 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -220,7 +220,8 @@ static int nfs3_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, task_flags); } -static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry) +static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry, + const struct cred *cred) { struct nfs3_accessargs arg = { .fh = NFS_FH(inode), @@ -231,7 +232,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry) .rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS], .rpc_argp = &arg, .rpc_resp = &res, - .rpc_cred = entry->cred, + .rpc_cred = cred, }; int status = -ENOMEM; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 322ff45ad15c..f02aa9877e6f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2655,7 +2655,7 @@ static int nfs4_opendata_access(const struct cred *cred, cache.cred = cred; nfs_access_set_mask(&cache, opendata->o_res.access_result); - nfs_access_add_cache(state->inode, &cache); + nfs_access_add_cache(state->inode, &cache, cred); flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; if ((mask & ~cache.mask & flags) == 0) @@ -4441,7 +4441,8 @@ static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, return err; } -static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) +static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, + const struct cred *cred) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_accessargs args = { @@ -4455,7 +4456,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], .rpc_argp = &args, .rpc_resp = &res, - .rpc_cred = entry->cred, + .rpc_cred = cred, }; int status = 0; @@ -4475,14 +4476,15 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry return status; } -static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) +static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, + const struct cred *cred) { struct nfs4_exception exception = { .interruptible = true, }; int err; do { - err = _nfs4_proc_access(inode, entry); + err = _nfs4_proc_access(inode, entry, cred); trace_nfs4_access(inode, err); err = nfs4_handle_exception(NFS_SERVER(inode), err, &exception); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f33559acbcc2..c33b1b792bc9 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -396,7 +396,7 @@ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fa extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct user_namespace *, const struct path *, struct kstat *, u32, unsigned int); -extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); +extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct user_namespace *, struct inode *, int); extern int nfs_open(struct inode *, struct file *); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 967a0098f0a9..9102bdaa3faa 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1737,7 +1737,7 @@ struct nfs_rpc_ops { struct nfs_fh *, struct nfs_fattr *); int (*lookupp) (struct inode *, struct nfs_fh *, struct nfs_fattr *); - int (*access) (struct inode *, struct nfs_access_entry *); + int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *); int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); int (*create) (struct inode *, struct dentry *, -- cgit v1.2.3-71-gd317 From 6238aec83f3fb12132f964937e5bbcf248fea8f9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 28 Sep 2021 09:47:57 +1000 Subject: NFS: don't store 'struct cred *' in struct nfs_access_entry Storing the 'struct cred *' in nfs_access_entry is problematic. An active 'cred' can keep a 'struct key *' active, and a quota is imposed on the number of such keys that a user can maintain. Cached 'nfs_access_entry' structs have indefinite lifetime, and having these keep 'struct key's alive imposes on that quota. So remove the 'struct cred *' and replace it with the fields we need: kuid_t, kgid_t, and struct group_info * This makes the 'struct nfs_access_entry' 64 bits larger. New function "access_cmp" is introduced which is identical to cred_fscmp() except that the second arg is an 'nfs_access_entry', rather than a 'cred' Fixes: b68572e07c58 ("NFS: change access cache to use 'struct cred'.") Signed-off-by: NeilBrown Signed-off-by: Anna Schumaker --- fs/nfs/dir.c | 50 ++++++++++++++++++++++++++++++++++++++++++++------ fs/nfs/nfs4proc.c | 1 - include/linux/nfs_fs.h | 4 +++- 3 files changed, 47 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a34351ce79a2..7dee3fd10382 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2528,7 +2528,7 @@ MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache lengt static void nfs_access_free_entry(struct nfs_access_entry *entry) { - put_cred(entry->cred); + put_group_info(entry->group_info); kfree_rcu(entry, rcu_head); smp_mb__before_atomic(); atomic_long_dec(&nfs_access_nr_entries); @@ -2654,6 +2654,43 @@ void nfs_access_zap_cache(struct inode *inode) } EXPORT_SYMBOL_GPL(nfs_access_zap_cache); +static int access_cmp(const struct cred *a, const struct nfs_access_entry *b) +{ + struct group_info *ga, *gb; + int g; + + if (uid_lt(a->fsuid, b->fsuid)) + return -1; + if (uid_gt(a->fsuid, b->fsuid)) + return 1; + + if (gid_lt(a->fsgid, b->fsgid)) + return -1; + if (gid_gt(a->fsgid, b->fsgid)) + return 1; + + ga = a->group_info; + gb = b->group_info; + if (ga == gb) + return 0; + if (ga == NULL) + return -1; + if (gb == NULL) + return 1; + if (ga->ngroups < gb->ngroups) + return -1; + if (ga->ngroups > gb->ngroups) + return 1; + + for (g = 0; g < ga->ngroups; g++) { + if (gid_lt(ga->gid[g], gb->gid[g])) + return -1; + if (gid_gt(ga->gid[g], gb->gid[g])) + return 1; + } + return 0; +} + static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred) { struct rb_node *n = NFS_I(inode)->access_cache.rb_node; @@ -2661,7 +2698,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co while (n != NULL) { struct nfs_access_entry *entry = rb_entry(n, struct nfs_access_entry, rb_node); - int cmp = cred_fscmp(cred, entry->cred); + int cmp = access_cmp(cred, entry); if (cmp < 0) n = n->rb_left; @@ -2731,7 +2768,7 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru)); cache = list_entry(lh, struct nfs_access_entry, lru); if (lh == &nfsi->access_cache_entry_lru || - cred_fscmp(cred, cache->cred) != 0) + access_cmp(cred, cache) != 0) cache = NULL; if (cache == NULL) goto out; @@ -2773,7 +2810,7 @@ static void nfs_access_add_rbtree(struct inode *inode, while (*p != NULL) { parent = *p; entry = rb_entry(parent, struct nfs_access_entry, rb_node); - cmp = cred_fscmp(cred, entry->cred); + cmp = access_cmp(cred, entry); if (cmp < 0) p = &parent->rb_left; @@ -2802,7 +2839,9 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set, if (cache == NULL) return; RB_CLEAR_NODE(&cache->rb_node); - cache->cred = get_cred(cred); + cache->fsuid = cred->fsuid; + cache->fsgid = cred->fsgid; + cache->group_info = get_group_info(cred->group_info); cache->mask = set->mask; /* The above field assignments must be visible @@ -2895,7 +2934,6 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; else cache.mask |= NFS_ACCESS_EXECUTE; - cache.cred = cred; status = NFS_PROTO(inode)->access(inode, &cache, cred); if (status != 0) { if (status == -ESTALE) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f02aa9877e6f..5d9ff01ddf46 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2653,7 +2653,6 @@ static int nfs4_opendata_access(const struct cred *cred, } else if ((fmode & FMODE_READ) && !opendata->file_created) mask = NFS4_ACCESS_READ; - cache.cred = cred; nfs_access_set_mask(&cache, opendata->o_res.access_result); nfs_access_add_cache(state->inode, &cache, cred); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c33b1b792bc9..450e393d4bf2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -61,7 +61,9 @@ struct nfs_access_entry { struct rb_node rb_node; struct list_head lru; - const struct cred * cred; + kuid_t fsuid; + kgid_t fsgid; + struct group_info *group_info; __u32 mask; struct rcu_head rcu_head; }; -- cgit v1.2.3-71-gd317 From 1ab5be4ac5b1c9ce39ce1037c45b68d2ce6eede0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 17 Dec 2021 15:36:54 -0500 Subject: NFSv4: Add some support for case insensitive filesystems Add capabilities to allow the NFS client to recognise when it is dealing with case insensitive and case preserving filesystems. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 8 +++++++- fs/nfs/nfs4xdr.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/nfs_fs_sb.h | 2 ++ include/linux/nfs_xdr.h | 2 ++ 4 files changed, 51 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5d9ff01ddf46..a6d7472058ad 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3840,7 +3840,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f FATTR4_WORD0_FH_EXPIRE_TYPE | FATTR4_WORD0_LINK_SUPPORT | FATTR4_WORD0_SYMLINK_SUPPORT | - FATTR4_WORD0_ACLSUPPORT; + FATTR4_WORD0_ACLSUPPORT | + FATTR4_WORD0_CASE_INSENSITIVE | + FATTR4_WORD0_CASE_PRESERVING; if (minorversion) bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; @@ -3869,6 +3871,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f server->caps |= NFS_CAP_HARDLINKS; if (res.has_symlinks != 0) server->caps |= NFS_CAP_SYMLINKS; + if (res.case_insensitive) + server->caps |= NFS_CAP_CASE_INSENSITIVE; + if (res.case_preserving) + server->caps |= NFS_CAP_CASE_PRESERVING; #ifdef CONFIG_NFS_V4_SECURITY_LABEL if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) server->caps |= NFS_CAP_SECURITY_LABEL; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 801119b7a596..a9487c840052 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3533,6 +3533,42 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint return 0; } +static int decode_attr_case_insensitive(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) +{ + __be32 *p; + + *res = 0; + if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_INSENSITIVE - 1U))) + return -EIO; + if (likely(bitmap[0] & FATTR4_WORD0_CASE_INSENSITIVE)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + *res = be32_to_cpup(p); + bitmap[0] &= ~FATTR4_WORD0_CASE_INSENSITIVE; + } + dprintk("%s: case_insensitive=%s\n", __func__, *res == 0 ? "false" : "true"); + return 0; +} + +static int decode_attr_case_preserving(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) +{ + __be32 *p; + + *res = 0; + if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_PRESERVING - 1U))) + return -EIO; + if (likely(bitmap[0] & FATTR4_WORD0_CASE_PRESERVING)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + *res = be32_to_cpup(p); + bitmap[0] &= ~FATTR4_WORD0_CASE_PRESERVING; + } + dprintk("%s: case_preserving=%s\n", __func__, *res == 0 ? "false" : "true"); + return 0; +} + static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) { __be32 *p; @@ -4413,6 +4449,10 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re goto xdr_error; if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0) goto xdr_error; + if ((status = decode_attr_case_insensitive(xdr, bitmap, &res->case_insensitive)) != 0) + goto xdr_error; + if ((status = decode_attr_case_preserving(xdr, bitmap, &res->case_preserving)) != 0) + goto xdr_error; if ((status = decode_attr_exclcreat_supported(xdr, bitmap, res->exclcreat_bitmask)) != 0) goto xdr_error; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 2a9acbfe00f0..f24fc67af42d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -271,6 +271,8 @@ struct nfs_server { #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) #define NFS_CAP_LGOPEN (1U << 5) +#define NFS_CAP_CASE_INSENSITIVE (1U << 6) +#define NFS_CAP_CASE_PRESERVING (1U << 7) #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9102bdaa3faa..ddff92396a3f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1194,6 +1194,8 @@ struct nfs4_server_caps_res { u32 has_links; u32 has_symlinks; u32 fh_expire_type; + u32 case_insensitive; + u32 case_preserving; }; #define NFS4_PATHNAME_MAXCOMPONENTS 512 -- cgit v1.2.3-71-gd317 From e8c1f36157ce0bf8c150059c3f9f573c13a186df Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 10 Jan 2022 16:33:05 -0800 Subject: dma-buf-map: Fix dot vs comma in example Fix typo: separate arguments with comma rather than dot. Signed-off-by: Lucas De Marchi Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220111003305.1214667-1-lucas.demarchi@intel.com --- include/linux/dma-buf-map.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h index 278d489e4bdd..19fa0b5ae5ec 100644 --- a/include/linux/dma-buf-map.h +++ b/include/linux/dma-buf-map.h @@ -52,13 +52,13 @@ * * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf); * - * dma_buf_map_set_vaddr(&map. 0xdeadbeaf); + * dma_buf_map_set_vaddr(&map, 0xdeadbeaf); * * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). * * .. code-block:: c * - * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf); + * dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf); * * Instances of struct dma_buf_map do not have to be cleaned up, but * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings -- cgit v1.2.3-71-gd317 From 8a59bb93b7e3cca389af44781a429ac12ac49be6 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Thu, 9 Dec 2021 14:53:30 -0500 Subject: NFSv4 store server support for fs_location attribute Define and store if server returns it supports fs_locations attribute as a capability. Signed-off-by: Olga Kornievskaia Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 2 ++ include/linux/nfs_fs_sb.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 44ab27f20f04..7d4c63282793 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3875,6 +3875,8 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) server->caps |= NFS_CAP_SECURITY_LABEL; #endif + if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) + server->caps |= NFS_CAP_FS_LOCATIONS; if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index f24fc67af42d..8c08b356c8ca 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -289,5 +289,5 @@ struct nfs_server { #define NFS_CAP_COPY_NOTIFY (1U << 27) #define NFS_CAP_XATTR (1U << 28) #define NFS_CAP_READ_PLUS (1U << 29) - +#define NFS_CAP_FS_LOCATIONS (1U << 30) #endif -- cgit v1.2.3-71-gd317 From 1976b2b31462151403c9fc110204fcc2a77bdfd1 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 12 Jan 2022 10:27:38 -0500 Subject: NFSv4.1 query for fs_location attr on a new file system Query the server for other possible trunkable locations for a given file system on a 4.1+ mount. v2: -- added missing static to nfs4_discover_trunking, reported by the kernel test robot Signed-off-by: Olga Kornievskaia Signed-off-by: Anna Schumaker --- fs/nfs/client.c | 7 +++++ fs/nfs/nfs4_fs.h | 9 +++--- fs/nfs/nfs4proc.c | 76 ++++++++++++++++++++++++++++++++++++++++++------- fs/nfs/nfs4state.c | 3 +- include/linux/nfs_xdr.h | 1 + 5 files changed, 81 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1e4dc1ab9312..f7e39cc4472b 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -860,6 +860,13 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str server->namelen = pathinfo.max_namelen; } + if (clp->rpc_ops->discover_trunking != NULL && + (server->caps & NFS_CAP_FS_LOCATIONS)) { + error = clp->rpc_ops->discover_trunking(server, mntfh); + if (error < 0) + return error; + } + return 0; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 89076dd32334..166ea6112e61 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -260,8 +260,8 @@ struct nfs4_state_maintenance_ops { }; struct nfs4_mig_recovery_ops { - int (*get_locations)(struct inode *, struct nfs4_fs_locations *, - struct page *, const struct cred *); + int (*get_locations)(struct nfs_server *, struct nfs_fh *, + struct nfs4_fs_locations *, struct page *, const struct cred *); int (*fsid_present)(struct inode *, const struct cred *); }; @@ -302,8 +302,9 @@ extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait); extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *, struct nfs4_fs_locations *, struct page *); -extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *, - struct page *page, const struct cred *); +extern int nfs4_proc_get_locations(struct nfs_server *, struct nfs_fh *, + struct nfs4_fs_locations *, + struct page *page, const struct cred *); extern int nfs4_proc_fsid_present(struct inode *, const struct cred *); extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct dentry *, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7d4c63282793..fc4629d2d2ab 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3935,6 +3935,60 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) return err; } +static int _nfs4_discover_trunking(struct nfs_server *server, + struct nfs_fh *fhandle) +{ + struct nfs4_fs_locations *locations = NULL; + struct page *page; + const struct cred *cred; + struct nfs_client *clp = server->nfs_client; + const struct nfs4_state_maintenance_ops *ops = + clp->cl_mvops->state_renewal_ops; + int status = -ENOMEM; + + cred = ops->get_state_renewal_cred(clp); + if (cred == NULL) { + cred = nfs4_get_clid_cred(clp); + if (cred == NULL) + return -ENOKEY; + } + + page = alloc_page(GFP_KERNEL); + locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); + if (page == NULL || locations == NULL) + goto out; + + status = nfs4_proc_get_locations(server, fhandle, locations, page, + cred); + if (status) + goto out; +out: + if (page) + __free_page(page); + kfree(locations); + return status; +} + +static int nfs4_discover_trunking(struct nfs_server *server, + struct nfs_fh *fhandle) +{ + struct nfs4_exception exception = { + .interruptible = true, + }; + struct nfs_client *clp = server->nfs_client; + int err = 0; + + if (!nfs4_has_session(clp)) + goto out; + do { + err = nfs4_handle_exception(server, + _nfs4_discover_trunking(server, fhandle), + &exception); + } while (exception.retry); +out: + return err; +} + static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { @@ -7823,18 +7877,18 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, * appended to this compound to identify the client ID which is * performing recovery. */ -static int _nfs40_proc_get_locations(struct inode *inode, +static int _nfs40_proc_get_locations(struct nfs_server *server, + struct nfs_fh *fhandle, struct nfs4_fs_locations *locations, struct page *page, const struct cred *cred) { - struct nfs_server *server = NFS_SERVER(inode); struct rpc_clnt *clnt = server->client; u32 bitmask[2] = { [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, }; struct nfs4_fs_locations_arg args = { .clientid = server->nfs_client->cl_clientid, - .fh = NFS_FH(inode), + .fh = fhandle, .page = page, .bitmask = bitmask, .migration = 1, /* skip LOOKUP */ @@ -7880,17 +7934,17 @@ static int _nfs40_proc_get_locations(struct inode *inode, * When the client supports GETATTR(fs_locations_info), it can * be plumbed in here. */ -static int _nfs41_proc_get_locations(struct inode *inode, +static int _nfs41_proc_get_locations(struct nfs_server *server, + struct nfs_fh *fhandle, struct nfs4_fs_locations *locations, struct page *page, const struct cred *cred) { - struct nfs_server *server = NFS_SERVER(inode); struct rpc_clnt *clnt = server->client; u32 bitmask[2] = { [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, }; struct nfs4_fs_locations_arg args = { - .fh = NFS_FH(inode), + .fh = fhandle, .page = page, .bitmask = bitmask, .migration = 1, /* skip LOOKUP */ @@ -7939,11 +7993,11 @@ static int _nfs41_proc_get_locations(struct inode *inode, * -NFS4ERR_LEASE_MOVED is returned if the server still has leases * from this client that require migration recovery. */ -int nfs4_proc_get_locations(struct inode *inode, +int nfs4_proc_get_locations(struct nfs_server *server, + struct nfs_fh *fhandle, struct nfs4_fs_locations *locations, struct page *page, const struct cred *cred) { - struct nfs_server *server = NFS_SERVER(inode); struct nfs_client *clp = server->nfs_client; const struct nfs4_mig_recovery_ops *ops = clp->cl_mvops->mig_recovery_ops; @@ -7956,10 +8010,11 @@ int nfs4_proc_get_locations(struct inode *inode, (unsigned long long)server->fsid.major, (unsigned long long)server->fsid.minor, clp->cl_hostname); - nfs_display_fhandle(NFS_FH(inode), __func__); + nfs_display_fhandle(fhandle, __func__); do { - status = ops->get_locations(inode, locations, page, cred); + status = ops->get_locations(server, fhandle, locations, page, + cred); if (status != -NFS4ERR_DELAY) break; nfs4_handle_exception(server, status, &exception); @@ -10428,6 +10483,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .free_client = nfs4_free_client, .create_server = nfs4_create_server, .clone_server = nfs_clone_server, + .discover_trunking = nfs4_discover_trunking, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f3265575c28d..499bef9fe118 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2098,7 +2098,8 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred } inode = d_inode(server->super->s_root); - result = nfs4_proc_get_locations(inode, locations, page, cred); + result = nfs4_proc_get_locations(server, NFS_FH(inode), locations, + page, cred); if (result) { dprintk("<-- %s: failed to retrieve fs_locations: %d\n", __func__, result); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index ddff92396a3f..728cb0c1f0b6 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1797,6 +1797,7 @@ struct nfs_rpc_ops { struct nfs_server *(*create_server)(struct fs_context *); struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); + int (*discover_trunking)(struct nfs_server *, struct nfs_fh *); }; /* -- cgit v1.2.3-71-gd317 From d72d84aea4d57a735d8cfbade32ed323f47a5941 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 14 Jan 2022 16:37:42 +0800 Subject: locking/rwsem: drop redundant semicolon of down_write_nest_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise, braces are needed when using it. Signed-off-by: Guchun Chen Acked-by: Christian König Acked-by: Peter Zijlstra (Intel) Link: https://patchwork.freedesktop.org/patch/msgid/20220114083742.6219-1-guchun.chen@amd.com Signed-off-by: Christian König --- include/linux/rwsem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f9348769e558..efa5c324369a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -230,7 +230,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map * do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ -} while (0); +} while (0) /* * Take/release a lock when not the owner will release it. -- cgit v1.2.3-71-gd317 From 09f5e7dc7ad705289e1b1ec065439aa3c42951c4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 20 Dec 2021 13:19:52 +0100 Subject: perf: Fix perf_event_read_local() time Time readers that cannot take locks (due to NMI etc..) currently make use of perf_event::shadow_ctx_time, which, for that event gives: time' = now + (time - timestamp) or, alternatively arranged: time' = time + (now - timestamp) IOW, the progression of time since the last time the shadow_ctx_time was updated. There's problems with this: A) the shadow_ctx_time is per-event, even though the ctx_time it reflects is obviously per context. The direct concequence of this is that the context needs to iterate all events all the time to keep the shadow_ctx_time in sync. B) even with the prior point, the context itself might not be active meaning its time should not advance to begin with. C) shadow_ctx_time isn't consistently updated when ctx_time is There are 3 users of this stuff, that suffer differently from this: - calc_timer_values() - perf_output_read() - perf_event_update_userpage() /* A */ - perf_event_read_local() /* A,B */ In particular, perf_output_read() doesn't suffer at all, because it's sample driven and hence only relevant when the event is actually running. This same was supposed to be true for perf_event_update_userpage(), after all self-monitoring implies the context is active *HOWEVER*, as per commit f79256532682 ("perf/core: fix userpage->time_enabled of inactive events") this goes wrong when combined with counter overcommit, in that case those events that do not get scheduled when the context becomes active (task events typically) miss out on the EVENT_TIME update and ENABLED time is inflated (for a little while) with the time the context was inactive. Once the event gets rotated in, this gets corrected, leading to a non-monotonic timeflow. perf_event_read_local() made things even worse, it can request time at any point, suffering all the problems perf_event_update_userpage() does and more. Because while perf_event_update_userpage() is limited by the context being active, perf_event_read_local() users have no such constraint. Therefore, completely overhaul things and do away with perf_event::shadow_ctx_time. Instead have regular context time updates keep track of this offset directly and provide perf_event_time_now() to complement perf_event_time(). perf_event_time_now() will, in adition to being context wide, also take into account if the context is active. For inactive context, it will not advance time. This latter property means the cgroup perf_cgroup_info context needs to grow addition state to track this. Additionally, since all this is strictly per-cpu, we can use barrier() to order context activity vs context time. Fixes: 7d9285e82db5 ("perf/bpf: Extend the perf_event_read_local() interface, a.k.a. "bpf: perf event change needed for subsequent bpf helpers"") Signed-off-by: Peter Zijlstra (Intel) Tested-by: Song Liu Tested-by: Namhyung Kim Link: https://lkml.kernel.org/r/YcB06DasOBtU0b00@hirez.programming.kicks-ass.net --- include/linux/perf_event.h | 15 +-- kernel/events/core.c | 246 +++++++++++++++++++++++++++------------------ 2 files changed, 149 insertions(+), 112 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 117f230bcdfd..733649184b27 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -693,18 +693,6 @@ struct perf_event { u64 total_time_running; u64 tstamp; - /* - * timestamp shadows the actual context timing but it can - * be safely used in NMI interrupt context. It reflects the - * context time as it was when the event was last scheduled in, - * or when ctx_sched_in failed to schedule the event because we - * run out of PMC. - * - * ctx_time already accounts for ctx->timestamp. Therefore to - * compute ctx_time for a sample, simply add perf_clock(). - */ - u64 shadow_ctx_time; - struct perf_event_attr attr; u16 header_size; u16 id_header_size; @@ -852,6 +840,7 @@ struct perf_event_context { */ u64 time; u64 timestamp; + u64 timeoffset; /* * These fields let us detect when two contexts have both @@ -934,6 +923,8 @@ struct bpf_perf_event_data_kern { struct perf_cgroup_info { u64 time; u64 timestamp; + u64 timeoffset; + int active; }; struct perf_cgroup { diff --git a/kernel/events/core.c b/kernel/events/core.c index fc18664f49b0..479c9e672ec4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -674,6 +674,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state) WRITE_ONCE(event->state, state); } +/* + * UP store-release, load-acquire + */ + +#define __store_release(ptr, val) \ +do { \ + barrier(); \ + WRITE_ONCE(*(ptr), (val)); \ +} while (0) + +#define __load_acquire(ptr) \ +({ \ + __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \ + barrier(); \ + ___p; \ +}) + #ifdef CONFIG_CGROUP_PERF static inline bool @@ -719,34 +736,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event) return t->time; } -static inline void __update_cgrp_time(struct perf_cgroup *cgrp) +static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) { - struct perf_cgroup_info *info; - u64 now; - - now = perf_clock(); + struct perf_cgroup_info *t; - info = this_cpu_ptr(cgrp->info); + t = per_cpu_ptr(event->cgrp->info, event->cpu); + if (!__load_acquire(&t->active)) + return t->time; + now += READ_ONCE(t->timeoffset); + return now; +} - info->time += now - info->timestamp; +static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv) +{ + if (adv) + info->time += now - info->timestamp; info->timestamp = now; + /* + * see update_context_time() + */ + WRITE_ONCE(info->timeoffset, info->time - info->timestamp); } -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final) { struct perf_cgroup *cgrp = cpuctx->cgrp; struct cgroup_subsys_state *css; + struct perf_cgroup_info *info; if (cgrp) { + u64 now = perf_clock(); + for (css = &cgrp->css; css; css = css->parent) { cgrp = container_of(css, struct perf_cgroup, css); - __update_cgrp_time(cgrp); + info = this_cpu_ptr(cgrp->info); + + __update_cgrp_time(info, now, true); + if (final) + __store_release(&info->active, 0); } } } static inline void update_cgrp_time_from_event(struct perf_event *event) { + struct perf_cgroup_info *info; struct perf_cgroup *cgrp; /* @@ -760,8 +794,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) /* * Do not update time when cgroup is not active */ - if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) - __update_cgrp_time(event->cgrp); + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) { + info = this_cpu_ptr(event->cgrp->info); + __update_cgrp_time(info, perf_clock(), true); + } } static inline void @@ -785,7 +821,8 @@ perf_cgroup_set_timestamp(struct task_struct *task, for (css = &cgrp->css; css; css = css->parent) { cgrp = container_of(css, struct perf_cgroup, css); info = this_cpu_ptr(cgrp->info); - info->timestamp = ctx->timestamp; + __update_cgrp_time(info, ctx->timestamp, false); + __store_release(&info->active, 1); } } @@ -981,14 +1018,6 @@ out: return ret; } -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) -{ - struct perf_cgroup_info *t; - t = per_cpu_ptr(event->cgrp->info, event->cpu); - event->shadow_ctx_time = now - t->timestamp; -} - static inline void perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) { @@ -1066,7 +1095,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) { } -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, + bool final) { } @@ -1098,12 +1128,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next) { } -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) +static inline u64 perf_cgroup_event_time(struct perf_event *event) { + return 0; } -static inline u64 perf_cgroup_event_time(struct perf_event *event) +static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) { return 0; } @@ -1525,22 +1555,59 @@ static void perf_unpin_context(struct perf_event_context *ctx) /* * Update the record of the current time in a context. */ -static void update_context_time(struct perf_event_context *ctx) +static void __update_context_time(struct perf_event_context *ctx, bool adv) { u64 now = perf_clock(); - ctx->time += now - ctx->timestamp; + if (adv) + ctx->time += now - ctx->timestamp; ctx->timestamp = now; + + /* + * The above: time' = time + (now - timestamp), can be re-arranged + * into: time` = now + (time - timestamp), which gives a single value + * offset to compute future time without locks on. + * + * See perf_event_time_now(), which can be used from NMI context where + * it's (obviously) not possible to acquire ctx->lock in order to read + * both the above values in a consistent manner. + */ + WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); +} + +static void update_context_time(struct perf_event_context *ctx) +{ + __update_context_time(ctx, true); } static u64 perf_event_time(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; + if (unlikely(!ctx)) + return 0; + if (is_cgroup_event(event)) return perf_cgroup_event_time(event); - return ctx ? ctx->time : 0; + return ctx->time; +} + +static u64 perf_event_time_now(struct perf_event *event, u64 now) +{ + struct perf_event_context *ctx = event->ctx; + + if (unlikely(!ctx)) + return 0; + + if (is_cgroup_event(event)) + return perf_cgroup_event_time_now(event, now); + + if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) + return ctx->time; + + now += READ_ONCE(ctx->timeoffset); + return now; } static enum event_type_t get_event_type(struct perf_event *event) @@ -2350,7 +2417,7 @@ __perf_remove_from_context(struct perf_event *event, if (ctx->is_active & EVENT_TIME) { update_context_time(ctx); - update_cgrp_time_from_cpuctx(cpuctx); + update_cgrp_time_from_cpuctx(cpuctx, false); } event_sched_out(event, cpuctx, ctx); @@ -2361,6 +2428,9 @@ __perf_remove_from_context(struct perf_event *event, list_del_event(event, ctx); if (!ctx->nr_events && ctx->is_active) { + if (ctx == &cpuctx->ctx) + update_cgrp_time_from_cpuctx(cpuctx, true); + ctx->is_active = 0; ctx->rotate_necessary = 0; if (ctx->task) { @@ -2482,40 +2552,6 @@ void perf_event_disable_inatomic(struct perf_event *event) irq_work_queue(&event->pending); } -static void perf_set_shadow_time(struct perf_event *event, - struct perf_event_context *ctx) -{ - /* - * use the correct time source for the time snapshot - * - * We could get by without this by leveraging the - * fact that to get to this function, the caller - * has most likely already called update_context_time() - * and update_cgrp_time_xx() and thus both timestamp - * are identical (or very close). Given that tstamp is, - * already adjusted for cgroup, we could say that: - * tstamp - ctx->timestamp - * is equivalent to - * tstamp - cgrp->timestamp. - * - * Then, in perf_output_read(), the calculation would - * work with no changes because: - * - event is guaranteed scheduled in - * - no scheduled out in between - * - thus the timestamp would be the same - * - * But this is a bit hairy. - * - * So instead, we have an explicit cgroup call to remain - * within the time source all along. We believe it - * is cleaner and simpler to understand. - */ - if (is_cgroup_event(event)) - perf_cgroup_set_shadow_time(event, event->tstamp); - else - event->shadow_ctx_time = event->tstamp - ctx->timestamp; -} - #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_event *event, int enable); @@ -2556,8 +2592,6 @@ event_sched_in(struct perf_event *event, perf_pmu_disable(event->pmu); - perf_set_shadow_time(event, ctx); - perf_log_itrace_start(event); if (event->pmu->add(event, PERF_EF_START)) { @@ -3251,16 +3285,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, return; } - ctx->is_active &= ~event_type; - if (!(ctx->is_active & EVENT_ALL)) - ctx->is_active = 0; - - if (ctx->task) { - WARN_ON_ONCE(cpuctx->task_ctx != ctx); - if (!ctx->is_active) - cpuctx->task_ctx = NULL; - } - /* * Always update time if it was set; not only when it changes. * Otherwise we can 'forget' to update time for any but the last @@ -3274,7 +3298,22 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (is_active & EVENT_TIME) { /* update (and stop) ctx time */ update_context_time(ctx); - update_cgrp_time_from_cpuctx(cpuctx); + update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); + /* + * CPU-release for the below ->is_active store, + * see __load_acquire() in perf_event_time_now() + */ + barrier(); + } + + ctx->is_active &= ~event_type; + if (!(ctx->is_active & EVENT_ALL)) + ctx->is_active = 0; + + if (ctx->task) { + WARN_ON_ONCE(cpuctx->task_ctx != ctx); + if (!ctx->is_active) + cpuctx->task_ctx = NULL; } is_active ^= ctx->is_active; /* changed bits */ @@ -3711,13 +3750,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, return 0; } +/* + * Because the userpage is strictly per-event (there is no concept of context, + * so there cannot be a context indirection), every userpage must be updated + * when context time starts :-( + * + * IOW, we must not miss EVENT_TIME edges. + */ static inline bool event_update_userpage(struct perf_event *event) { if (likely(!atomic_read(&event->mmap_count))) return false; perf_event_update_time(event); - perf_set_shadow_time(event, event->ctx); perf_event_update_userpage(event); return true; @@ -3801,13 +3846,23 @@ ctx_sched_in(struct perf_event_context *ctx, struct task_struct *task) { int is_active = ctx->is_active; - u64 now; lockdep_assert_held(&ctx->lock); if (likely(!ctx->nr_events)) return; + if (is_active ^ EVENT_TIME) { + /* start ctx time */ + __update_context_time(ctx, false); + perf_cgroup_set_timestamp(task, ctx); + /* + * CPU-release for the below ->is_active store, + * see __load_acquire() in perf_event_time_now() + */ + barrier(); + } + ctx->is_active |= (event_type | EVENT_TIME); if (ctx->task) { if (!is_active) @@ -3818,13 +3873,6 @@ ctx_sched_in(struct perf_event_context *ctx, is_active ^= ctx->is_active; /* changed bits */ - if (is_active & EVENT_TIME) { - /* start ctx time */ - now = perf_clock(); - ctx->timestamp = now; - perf_cgroup_set_timestamp(task, ctx); - } - /* * First go through the list and put on any pinned groups * in order to give them the best chance of going on. @@ -4418,6 +4466,18 @@ static inline u64 perf_event_count(struct perf_event *event) return local64_read(&event->count) + atomic64_read(&event->child_count); } +static void calc_timer_values(struct perf_event *event, + u64 *now, + u64 *enabled, + u64 *running) +{ + u64 ctx_time; + + *now = perf_clock(); + ctx_time = perf_event_time_now(event, *now); + __perf_update_times(event, ctx_time, enabled, running); +} + /* * NMI-safe method to read a local event, that is an event that * is: @@ -4477,10 +4537,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value, *value = local64_read(&event->count); if (enabled || running) { - u64 now = event->shadow_ctx_time + perf_clock(); - u64 __enabled, __running; + u64 __enabled, __running, __now;; - __perf_update_times(event, now, &__enabled, &__running); + calc_timer_values(event, &__now, &__enabled, &__running); if (enabled) *enabled = __enabled; if (running) @@ -5802,18 +5861,6 @@ static int perf_event_index(struct perf_event *event) return event->pmu->event_idx(event); } -static void calc_timer_values(struct perf_event *event, - u64 *now, - u64 *enabled, - u64 *running) -{ - u64 ctx_time; - - *now = perf_clock(); - ctx_time = event->shadow_ctx_time + *now; - __perf_update_times(event, ctx_time, enabled, running); -} - static void perf_event_init_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; @@ -6353,7 +6400,6 @@ accounting: ring_buffer_attach(event, rb); perf_event_update_time(event); - perf_set_shadow_time(event, event->ctx); perf_event_init_userpage(event); perf_event_update_userpage(event); } else { -- cgit v1.2.3-71-gd317 From a06247c6804f1a7c86a2e5398a4c1f1db1471848 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Tue, 11 Jan 2022 15:23:09 -0800 Subject: psi: Fix uaf issue when psi trigger is destroyed while being polled With write operation on psi files replacing old trigger with a new one, the lifetime of its waitqueue is totally arbitrary. Overwriting an existing trigger causes its waitqueue to be freed and pending poll() will stumble on trigger->event_wait which was destroyed. Fix this by disallowing to redefine an existing psi trigger. If a write operation is used on a file descriptor with an already existing psi trigger, the operation will fail with EBUSY error. Also bypass a check for psi_disabled in the psi_trigger_destroy as the flag can be flipped after the trigger is created, leading to a memory leak. Fixes: 0e94682b73bf ("psi: introduce psi monitor") Reported-by: syzbot+cdb5dd11c97cc532efad@syzkaller.appspotmail.com Suggested-by: Linus Torvalds Analyzed-by: Eric Biggers Signed-off-by: Suren Baghdasaryan Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Eric Biggers Acked-by: Johannes Weiner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220111232309.1786347-1-surenb@google.com --- Documentation/accounting/psi.rst | 3 +- include/linux/psi.h | 2 +- include/linux/psi_types.h | 3 -- kernel/cgroup/cgroup.c | 11 +++++-- kernel/sched/psi.c | 66 ++++++++++++++++++---------------------- 5 files changed, 40 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst index f2b3439edcc2..860fe651d645 100644 --- a/Documentation/accounting/psi.rst +++ b/Documentation/accounting/psi.rst @@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger for the same psi metric can be specified. However for each trigger a separate file descriptor is required to be able to poll it separately from others, therefore for each trigger a separate open() syscall should be made even -when opening the same psi interface file. +when opening the same psi interface file. Write operations to a file descriptor +with an already existing psi trigger will fail with EBUSY. Monitors activate only when system enters stall state for the monitored psi metric and deactivates upon exit from the stall state. While system is diff --git a/include/linux/psi.h b/include/linux/psi.h index a70ca833c6d7..f8ce53bfdb2a 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -33,7 +33,7 @@ void cgroup_move_task(struct task_struct *p, struct css_set *to); struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t); +void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 516c0fe836fd..1a3cef26d129 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -141,9 +141,6 @@ struct psi_trigger { * events to one per window */ u64 last_event_time; - - /* Refcounting to prevent premature destruction */ - struct kref refcount; }; struct psi_group { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index b31e1465868a..9d05c3ca2d5e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3643,6 +3643,12 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, cgroup_get(cgrp); cgroup_kn_unlock(of->kn); + /* Allow only one trigger per file descriptor */ + if (ctx->psi.trigger) { + cgroup_put(cgrp); + return -EBUSY; + } + psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; new = psi_trigger_create(psi, buf, nbytes, res); if (IS_ERR(new)) { @@ -3650,8 +3656,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, return PTR_ERR(new); } - psi_trigger_replace(&ctx->psi.trigger, new); - + smp_store_release(&ctx->psi.trigger, new); cgroup_put(cgrp); return nbytes; @@ -3690,7 +3695,7 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) { struct cgroup_file_ctx *ctx = of->priv; - psi_trigger_replace(&ctx->psi.trigger, NULL); + psi_trigger_destroy(ctx->psi.trigger); } bool cgroup_psi_enabled(void) diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index a679613a7cb7..c137c4d6983e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1162,7 +1162,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, t->event = 0; t->last_event_time = 0; init_waitqueue_head(&t->event_wait); - kref_init(&t->refcount); mutex_lock(&group->trigger_lock); @@ -1191,15 +1190,19 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, return t; } -static void psi_trigger_destroy(struct kref *ref) +void psi_trigger_destroy(struct psi_trigger *t) { - struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); - struct psi_group *group = t->group; + struct psi_group *group; struct task_struct *task_to_destroy = NULL; - if (static_branch_likely(&psi_disabled)) + /* + * We do not check psi_disabled since it might have been disabled after + * the trigger got created. + */ + if (!t) return; + group = t->group; /* * Wakeup waiters to stop polling. Can happen if cgroup is deleted * from under a polling process. @@ -1235,9 +1238,9 @@ static void psi_trigger_destroy(struct kref *ref) mutex_unlock(&group->trigger_lock); /* - * Wait for both *trigger_ptr from psi_trigger_replace and - * poll_task RCUs to complete their read-side critical sections - * before destroying the trigger and optionally the poll_task + * Wait for psi_schedule_poll_work RCU to complete its read-side + * critical section before destroying the trigger and optionally the + * poll_task. */ synchronize_rcu(); /* @@ -1254,18 +1257,6 @@ static void psi_trigger_destroy(struct kref *ref) kfree(t); } -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) -{ - struct psi_trigger *old = *trigger_ptr; - - if (static_branch_likely(&psi_disabled)) - return; - - rcu_assign_pointer(*trigger_ptr, new); - if (old) - kref_put(&old->refcount, psi_trigger_destroy); -} - __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait) { @@ -1275,24 +1266,15 @@ __poll_t psi_trigger_poll(void **trigger_ptr, if (static_branch_likely(&psi_disabled)) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - rcu_read_lock(); - - t = rcu_dereference(*(void __rcu __force **)trigger_ptr); - if (!t) { - rcu_read_unlock(); + t = smp_load_acquire(trigger_ptr); + if (!t) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - } - kref_get(&t->refcount); - - rcu_read_unlock(); poll_wait(file, &t->event_wait, wait); if (cmpxchg(&t->event, 1, 0) == 1) ret |= EPOLLPRI; - kref_put(&t->refcount, psi_trigger_destroy); - return ret; } @@ -1316,14 +1298,24 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, buf[buf_size - 1] = '\0'; - new = psi_trigger_create(&psi_system, buf, nbytes, res); - if (IS_ERR(new)) - return PTR_ERR(new); - seq = file->private_data; + /* Take seq->lock to protect seq->private from concurrent writes */ mutex_lock(&seq->lock); - psi_trigger_replace(&seq->private, new); + + /* Allow only one trigger per file descriptor */ + if (seq->private) { + mutex_unlock(&seq->lock); + return -EBUSY; + } + + new = psi_trigger_create(&psi_system, buf, nbytes, res); + if (IS_ERR(new)) { + mutex_unlock(&seq->lock); + return PTR_ERR(new); + } + + smp_store_release(&seq->private, new); mutex_unlock(&seq->lock); return nbytes; @@ -1358,7 +1350,7 @@ static int psi_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; - psi_trigger_replace(&seq->private, NULL); + psi_trigger_destroy(seq->private); return single_release(inode, file); } -- cgit v1.2.3-71-gd317 From 0e3872499de1a1230cef5221607d71aa09264bd5 Mon Sep 17 00:00:00 2001 From: Hui Su Date: Fri, 7 Jan 2022 17:52:54 +0800 Subject: kernel/sched: Remove dl_boosted flag comment since commit 2279f540ea7d ("sched/deadline: Fix priority inheritance with multiple scheduling classes"), we should not keep it here. Signed-off-by: Hui Su Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Daniel Bristot de Oliveira Link: https://lore.kernel.org/r/20220107095254.GA49258@localhost.localdomain --- include/linux/sched.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 7a1f16df66e3..7f8b4495e243 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -614,10 +614,6 @@ struct sched_dl_entity { * task has to wait for a replenishment to be performed at the * next firing of dl_timer. * - * @dl_boosted tells if we are boosted due to DI. If so we are - * outside bandwidth enforcement mechanism (but only until we - * exit the critical section); - * * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. * -- cgit v1.2.3-71-gd317 From dee872e124e8d5de22b68c58f6f6c3f5e8889160 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Fri, 14 Jan 2022 22:09:45 +0530 Subject: bpf: Populate kfunc BTF ID sets in struct btf This patch prepares the kernel to support putting all kinds of kfunc BTF ID sets in the struct btf itself. The various kernel subsystems will make register_btf_kfunc_id_set call in the initcalls (for built-in code and modules). The 'hook' is one of the many program types, e.g. XDP and TC/SCHED_CLS, STRUCT_OPS, and 'types' are check (allowed or not), acquire, release, and ret_null (with PTR_TO_BTF_ID_OR_NULL return type). A maximum of BTF_KFUNC_SET_MAX_CNT (32) kfunc BTF IDs are permitted in a set of certain hook and type for vmlinux sets, since they are allocated on demand, and otherwise set as NULL. Module sets can only be registered once per hook and type, hence they are directly assigned. A new btf_kfunc_id_set_contains function is exposed for use in verifier, this new method is faster than the existing list searching method, and is also automatic. It also lets other code not care whether the set is unallocated or not. Note that module code can only do single register_btf_kfunc_id_set call per hook. This is why sorting is only done for in-kernel vmlinux sets, because there might be multiple sets for the same hook and type that must be concatenated, hence sorting them is required to ensure bsearch in btf_id_set_contains continues to work correctly. Next commit will update the kernel users to make use of this infrastructure. Finally, add __maybe_unused annotation for BTF ID macros for the !CONFIG_DEBUG_INFO_BTF case, so that they don't produce warnings during build time. The previous patch is also needed to provide synchronization against initialization for module BTF's kfunc_set_tab introduced here, as described below: The kfunc_set_tab pointer in struct btf is write-once (if we consider the registration phase (comprised of multiple register_btf_kfunc_id_set calls) as a single operation). In this sense, once it has been fully prepared, it isn't modified, only used for lookup (from the verifier context). For btf_vmlinux, it is initialized fully during the do_initcalls phase, which happens fairly early in the boot process, before any processes are present. This also eliminates the possibility of bpf_check being called at that point, thus relieving us of ensuring any synchronization between the registration and lookup function (btf_kfunc_id_set_contains). However, the case for module BTF is a bit tricky. The BTF is parsed, prepared, and published from the MODULE_STATE_COMING notifier callback. After this, the module initcalls are invoked, where our registration function will be called to populate the kfunc_set_tab for module BTF. At this point, BTF may be available to userspace while its corresponding module is still intializing. A BTF fd can then be passed to verifier using bpf syscall (e.g. for kfunc call insn). Hence, there is a race window where verifier may concurrently try to lookup the kfunc_set_tab. To prevent this race, we must ensure the operations are serialized, or waiting for the __init functions to complete. In the earlier registration API, this race was alleviated as verifier bpf_check_mod_kfunc_call didn't find the kfunc BTF ID until it was added by the registration function (called usually at the end of module __init function after all module resources have been initialized). If the verifier made the check_kfunc_call before kfunc BTF ID was added to the list, it would fail verification (saying call isn't allowed). The access to list was protected using a mutex. Now, it would still fail verification, but for a different reason (returning ENXIO due to the failed btf_try_get_module call in add_kfunc_call), because if the __init call is in progress the module will be in the middle of MODULE_STATE_COMING -> MODULE_STATE_LIVE transition, and the BTF_MODULE_LIVE flag for btf_module instance will not be set, so the btf_try_get_module call will fail. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20220114163953.1455836-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/btf.h | 39 ++++++++ include/linux/btf_ids.h | 13 +-- kernel/bpf/btf.c | 244 +++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 289 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/btf.h b/include/linux/btf.h index 0c74348cbc9d..c451f8e2612a 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -12,11 +12,33 @@ #define BTF_TYPE_EMIT(type) ((void)(type *)0) #define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val) +enum btf_kfunc_type { + BTF_KFUNC_TYPE_CHECK, + BTF_KFUNC_TYPE_ACQUIRE, + BTF_KFUNC_TYPE_RELEASE, + BTF_KFUNC_TYPE_RET_NULL, + BTF_KFUNC_TYPE_MAX, +}; + struct btf; struct btf_member; struct btf_type; union bpf_attr; struct btf_show; +struct btf_id_set; + +struct btf_kfunc_id_set { + struct module *owner; + union { + struct { + struct btf_id_set *check_set; + struct btf_id_set *acquire_set; + struct btf_id_set *release_set; + struct btf_id_set *ret_null_set; + }; + struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX]; + }; +}; extern const struct file_operations btf_fops; @@ -307,6 +329,11 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); +bool btf_kfunc_id_set_contains(const struct btf *btf, + enum bpf_prog_type prog_type, + enum btf_kfunc_type type, u32 kfunc_btf_id); +int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, + const struct btf_kfunc_id_set *s); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -318,6 +345,18 @@ static inline const char *btf_name_by_offset(const struct btf *btf, { return NULL; } +static inline bool btf_kfunc_id_set_contains(const struct btf *btf, + enum bpf_prog_type prog_type, + enum btf_kfunc_type type, + u32 kfunc_btf_id) +{ + return false; +} +static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, + const struct btf_kfunc_id_set *s) +{ + return 0; +} #endif struct kfunc_btf_id_set { diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 919c0fde1c51..bc5d9cc34e4c 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -11,6 +11,7 @@ struct btf_id_set { #ifdef CONFIG_DEBUG_INFO_BTF #include /* for __PASTE */ +#include /* for __maybe_unused */ /* * Following macros help to define lists of BTF IDs placed @@ -146,14 +147,14 @@ extern struct btf_id_set name; #else -#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID_LIST(name) static u32 __maybe_unused name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED -#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n]; -#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; -#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; -#define BTF_SET_START(name) static struct btf_id_set name = { 0 }; -#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; +#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n]; +#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1]; +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1]; +#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 }; +#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 }; #define BTF_SET_END(name) #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index f25bca59909d..74037bd65d17 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -198,6 +198,21 @@ DEFINE_IDR(btf_idr); DEFINE_SPINLOCK(btf_idr_lock); +enum btf_kfunc_hook { + BTF_KFUNC_HOOK_XDP, + BTF_KFUNC_HOOK_TC, + BTF_KFUNC_HOOK_STRUCT_OPS, + BTF_KFUNC_HOOK_MAX, +}; + +enum { + BTF_KFUNC_SET_MAX_CNT = 32, +}; + +struct btf_kfunc_set_tab { + struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX]; +}; + struct btf { void *data; struct btf_type **types; @@ -212,6 +227,7 @@ struct btf { refcount_t refcnt; u32 id; struct rcu_head rcu; + struct btf_kfunc_set_tab *kfunc_set_tab; /* split BTF support */ struct btf *base_btf; @@ -1531,8 +1547,30 @@ static void btf_free_id(struct btf *btf) spin_unlock_irqrestore(&btf_idr_lock, flags); } +static void btf_free_kfunc_set_tab(struct btf *btf) +{ + struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; + int hook, type; + + if (!tab) + return; + /* For module BTF, we directly assign the sets being registered, so + * there is nothing to free except kfunc_set_tab. + */ + if (btf_is_module(btf)) + goto free_tab; + for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) { + for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++) + kfree(tab->sets[hook][type]); + } +free_tab: + kfree(tab); + btf->kfunc_set_tab = NULL; +} + static void btf_free(struct btf *btf) { + btf_free_kfunc_set_tab(btf); kvfree(btf->types); kvfree(btf->resolved_sizes); kvfree(btf->resolved_ids); @@ -6371,6 +6409,36 @@ struct module *btf_try_get_module(const struct btf *btf) return res; } +/* Returns struct btf corresponding to the struct module + * + * This function can return NULL or ERR_PTR. Note that caller must + * release reference for struct btf iff btf_is_module is true. + */ +static struct btf *btf_get_module_btf(const struct module *module) +{ + struct btf *btf = NULL; +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + struct btf_module *btf_mod, *tmp; +#endif + + if (!module) + return bpf_get_btf_vmlinux(); +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + mutex_lock(&btf_module_mutex); + list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { + if (btf_mod->module != module) + continue; + + btf_get(btf_mod->btf); + btf = btf_mod->btf; + break; + } + mutex_unlock(&btf_module_mutex); +#endif + + return btf; +} + BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) { struct btf *btf; @@ -6438,7 +6506,181 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) BTF_TRACING_TYPE_xxx #undef BTF_TRACING_TYPE -/* BTF ID set registration API for modules */ +/* Kernel Function (kfunc) BTF ID set registration API */ + +static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, + enum btf_kfunc_type type, + struct btf_id_set *add_set, bool vmlinux_set) +{ + struct btf_kfunc_set_tab *tab; + struct btf_id_set *set; + u32 set_cnt; + int ret; + + if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) { + ret = -EINVAL; + goto end; + } + + if (!add_set->cnt) + return 0; + + tab = btf->kfunc_set_tab; + if (!tab) { + tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); + if (!tab) + return -ENOMEM; + btf->kfunc_set_tab = tab; + } + + set = tab->sets[hook][type]; + /* Warn when register_btf_kfunc_id_set is called twice for the same hook + * for module sets. + */ + if (WARN_ON_ONCE(set && !vmlinux_set)) { + ret = -EINVAL; + goto end; + } + + /* We don't need to allocate, concatenate, and sort module sets, because + * only one is allowed per hook. Hence, we can directly assign the + * pointer and return. + */ + if (!vmlinux_set) { + tab->sets[hook][type] = add_set; + return 0; + } + + /* In case of vmlinux sets, there may be more than one set being + * registered per hook. To create a unified set, we allocate a new set + * and concatenate all individual sets being registered. While each set + * is individually sorted, they may become unsorted when concatenated, + * hence re-sorting the final set again is required to make binary + * searching the set using btf_id_set_contains function work. + */ + set_cnt = set ? set->cnt : 0; + + if (set_cnt > U32_MAX - add_set->cnt) { + ret = -EOVERFLOW; + goto end; + } + + if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { + ret = -E2BIG; + goto end; + } + + /* Grow set */ + set = krealloc(tab->sets[hook][type], + offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]), + GFP_KERNEL | __GFP_NOWARN); + if (!set) { + ret = -ENOMEM; + goto end; + } + + /* For newly allocated set, initialize set->cnt to 0 */ + if (!tab->sets[hook][type]) + set->cnt = 0; + tab->sets[hook][type] = set; + + /* Concatenate the two sets */ + memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0])); + set->cnt += add_set->cnt; + + sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL); + + return 0; +end: + btf_free_kfunc_set_tab(btf); + return ret; +} + +static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, + const struct btf_kfunc_id_set *kset) +{ + bool vmlinux_set = !btf_is_module(btf); + int type, ret; + + for (type = 0; type < ARRAY_SIZE(kset->sets); type++) { + if (!kset->sets[type]) + continue; + + ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set); + if (ret) + break; + } + return ret; +} + +static bool __btf_kfunc_id_set_contains(const struct btf *btf, + enum btf_kfunc_hook hook, + enum btf_kfunc_type type, + u32 kfunc_btf_id) +{ + struct btf_id_set *set; + + if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) + return false; + if (!btf->kfunc_set_tab) + return false; + set = btf->kfunc_set_tab->sets[hook][type]; + if (!set) + return false; + return btf_id_set_contains(set, kfunc_btf_id); +} + +static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) +{ + switch (prog_type) { + case BPF_PROG_TYPE_XDP: + return BTF_KFUNC_HOOK_XDP; + case BPF_PROG_TYPE_SCHED_CLS: + return BTF_KFUNC_HOOK_TC; + case BPF_PROG_TYPE_STRUCT_OPS: + return BTF_KFUNC_HOOK_STRUCT_OPS; + default: + return BTF_KFUNC_HOOK_MAX; + } +} + +/* Caution: + * Reference to the module (obtained using btf_try_get_module) corresponding to + * the struct btf *MUST* be held when calling this function from verifier + * context. This is usually true as we stash references in prog's kfunc_btf_tab; + * keeping the reference for the duration of the call provides the necessary + * protection for looking up a well-formed btf->kfunc_set_tab. + */ +bool btf_kfunc_id_set_contains(const struct btf *btf, + enum bpf_prog_type prog_type, + enum btf_kfunc_type type, u32 kfunc_btf_id) +{ + enum btf_kfunc_hook hook; + + hook = bpf_prog_type_to_kfunc_hook(prog_type); + return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id); +} + +/* This function must be invoked only from initcalls/module init functions */ +int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, + const struct btf_kfunc_id_set *kset) +{ + enum btf_kfunc_hook hook; + struct btf *btf; + int ret; + + btf = btf_get_module_btf(kset->owner); + if (IS_ERR_OR_NULL(btf)) + return btf ? PTR_ERR(btf) : -ENOENT; + + hook = bpf_prog_type_to_kfunc_hook(prog_type); + ret = btf_populate_kfunc_set(btf, hook, kset); + /* reference is only taken for module BTF */ + if (btf_is_module(btf)) + btf_put(btf); + return ret; +} +EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); #ifdef CONFIG_DEBUG_INFO_BTF_MODULES -- cgit v1.2.3-71-gd317 From b202d84422223b7222cba5031d182f20b37e146e Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Fri, 14 Jan 2022 22:09:46 +0530 Subject: bpf: Remove check_kfunc_call callback and old kfunc BTF ID API Completely remove the old code for check_kfunc_call to help it work with modules, and also the callback itself. The previous commit adds infrastructure to register all sets and put them in vmlinux or module BTF, and concatenates all related sets organized by the hook and the type. Once populated, these sets remain immutable for the lifetime of the struct btf. Also, since we don't need the 'owner' module anywhere when doing check_kfunc_call, drop the 'btf_modp' module parameter from find_kfunc_desc_btf. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20220114163953.1455836-4-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 8 ---- include/linux/btf.h | 44 --------------------- kernel/bpf/btf.c | 46 ---------------------- kernel/bpf/verifier.c | 20 ++++------ net/bpf/test_run.c | 23 ++++++----- net/core/filter.c | 1 - net/ipv4/bpf_tcp_ca.c | 22 ++++++----- net/ipv4/tcp_bbr.c | 18 +++++---- net/ipv4/tcp_cubic.c | 17 ++++---- net/ipv4/tcp_dctcp.c | 18 +++++---- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 17 ++++---- 11 files changed, 73 insertions(+), 161 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6e947cd91152..6d7346c54d83 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -573,7 +573,6 @@ struct bpf_verifier_ops { const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); - bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner); }; struct bpf_prog_offload_ops { @@ -1719,7 +1718,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); @@ -1971,12 +1969,6 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, return -ENOTSUPP; } -static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, - struct module *owner) -{ - return false; -} - static inline void bpf_map_put(struct bpf_map *map) { } diff --git a/include/linux/btf.h b/include/linux/btf.h index c451f8e2612a..b12cfe3b12bb 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -359,48 +359,4 @@ static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, } #endif -struct kfunc_btf_id_set { - struct list_head list; - struct btf_id_set *set; - struct module *owner; -}; - -struct kfunc_btf_id_list { - struct list_head list; - struct mutex mutex; -}; - -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES -void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s); -void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s); -bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, - struct module *owner); - -extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; -extern struct kfunc_btf_id_list prog_test_kfunc_list; -#else -static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) -{ -} -static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) -{ -} -static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, - u32 kfunc_id, struct module *owner) -{ - return false; -} - -static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused; -static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused; -#endif - -#define DEFINE_KFUNC_BTF_ID_SET(set, name) \ - struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ - THIS_MODULE } - #endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 74037bd65d17..4be5cf629ca9 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6682,52 +6682,6 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, } EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES - -void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) -{ - mutex_lock(&l->mutex); - list_add(&s->list, &l->list); - mutex_unlock(&l->mutex); -} -EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set); - -void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) -{ - mutex_lock(&l->mutex); - list_del_init(&s->list); - mutex_unlock(&l->mutex); -} -EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set); - -bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, - struct module *owner) -{ - struct kfunc_btf_id_set *s; - - mutex_lock(&klist->mutex); - list_for_each_entry(s, &klist->list, list) { - if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) { - mutex_unlock(&klist->mutex); - return true; - } - } - mutex_unlock(&klist->mutex); - return false; -} - -#define DEFINE_KFUNC_BTF_ID_LIST(name) \ - struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \ - __MUTEX_INITIALIZER(name.mutex) }; \ - EXPORT_SYMBOL_GPL(name) - -DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); -DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); - -#endif - int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bfb45381fb3f..72802c1eb5ac 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1741,7 +1741,7 @@ find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) } static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, - s16 offset, struct module **btf_modp) + s16 offset) { struct bpf_kfunc_btf kf_btf = { .offset = offset }; struct bpf_kfunc_btf_tab *tab; @@ -1795,8 +1795,6 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), kfunc_btf_cmp_by_off, NULL); } - if (btf_modp) - *btf_modp = b->module; return b->btf; } @@ -1813,8 +1811,7 @@ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) } static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, - u32 func_id, s16 offset, - struct module **btf_modp) + u32 func_id, s16 offset) { if (offset) { if (offset < 0) { @@ -1825,7 +1822,7 @@ static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, return ERR_PTR(-EINVAL); } - return __find_kfunc_desc_btf(env, offset, btf_modp); + return __find_kfunc_desc_btf(env, offset); } return btf_vmlinux ?: ERR_PTR(-ENOENT); } @@ -1888,7 +1885,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) prog_aux->kfunc_btf_tab = btf_tab; } - desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL); + desc_btf = find_kfunc_desc_btf(env, func_id, offset); if (IS_ERR(desc_btf)) { verbose(env, "failed to find BTF for kernel function\n"); return PTR_ERR(desc_btf); @@ -2349,7 +2346,7 @@ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) return NULL; - desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL); + desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off); if (IS_ERR(desc_btf)) return ""; @@ -6820,7 +6817,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; u32 i, nargs, func_id, ptr_type_id; - struct module *btf_mod = NULL; const struct btf_param *args; struct btf *desc_btf; int err; @@ -6829,7 +6825,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) if (!insn->imm) return 0; - desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod); + desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off); if (IS_ERR(desc_btf)) return PTR_ERR(desc_btf); @@ -6838,8 +6834,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) func_name = btf_name_by_offset(desc_btf, func->name_off); func_proto = btf_type_by_id(desc_btf, func->type); - if (!env->ops->check_kfunc_call || - !env->ops->check_kfunc_call(func_id, btf_mod)) { + if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_CHECK, func_id)) { verbose(env, "calling kernel function %s is not allowed\n", func_name); return -EACCES; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 46dd95755967..7796a8c747a0 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -236,18 +237,11 @@ __diag_pop(); ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); -BTF_SET_START(test_sk_kfunc_ids) +BTF_SET_START(test_sk_check_kfunc_ids) BTF_ID(func, bpf_kfunc_call_test1) BTF_ID(func, bpf_kfunc_call_test2) BTF_ID(func, bpf_kfunc_call_test3) -BTF_SET_END(test_sk_kfunc_ids) - -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner) -{ - if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id)) - return true; - return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner); -} +BTF_SET_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 size, u32 headroom, u32 tailroom) @@ -1067,3 +1061,14 @@ out: kfree(ctx); return err; } + +static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &test_sk_check_kfunc_ids, +}; + +static int __init bpf_prog_test_run_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); +} +late_initcall(bpf_prog_test_run_init); diff --git a/net/core/filter.c b/net/core/filter.c index 4603b7cd3cd1..f73a84c75970 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10062,7 +10062,6 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = { .convert_ctx_access = tc_cls_act_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, .gen_ld_abs = bpf_gen_ld_abs, - .check_kfunc_call = bpf_prog_test_check_kfunc_call, }; const struct bpf_prog_ops tc_cls_act_prog_ops = { diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index de610cb83694..b60c9fd7147e 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ +#include #include #include #include @@ -212,26 +213,23 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, } } -BTF_SET_START(bpf_tcp_ca_kfunc_ids) +BTF_SET_START(bpf_tcp_ca_check_kfunc_ids) BTF_ID(func, tcp_reno_ssthresh) BTF_ID(func, tcp_reno_cong_avoid) BTF_ID(func, tcp_reno_undo_cwnd) BTF_ID(func, tcp_slow_start) BTF_ID(func, tcp_cong_avoid_ai) -BTF_SET_END(bpf_tcp_ca_kfunc_ids) +BTF_SET_END(bpf_tcp_ca_check_kfunc_ids) -static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner) -{ - if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id)) - return true; - return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner); -} +static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &bpf_tcp_ca_check_kfunc_ids, +}; static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = { .get_func_proto = bpf_tcp_ca_get_func_proto, .is_valid_access = bpf_tcp_ca_is_valid_access, .btf_struct_access = bpf_tcp_ca_btf_struct_access, - .check_kfunc_call = bpf_tcp_ca_check_kfunc_call, }; static int bpf_tcp_ca_init_member(const struct btf_type *t, @@ -300,3 +298,9 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .init = bpf_tcp_ca_init, .name = "tcp_congestion_ops", }; + +static int __init bpf_tcp_ca_kfunc_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set); +} +late_initcall(bpf_tcp_ca_kfunc_init); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index ec5550089b4d..02e8626ccb27 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -1154,7 +1154,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { .set_state = bbr_set_state, }; -BTF_SET_START(tcp_bbr_kfunc_ids) +BTF_SET_START(tcp_bbr_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID(func, bbr_init) @@ -1167,25 +1167,27 @@ BTF_ID(func, bbr_min_tso_segs) BTF_ID(func, bbr_set_state) #endif #endif -BTF_SET_END(tcp_bbr_kfunc_ids) +BTF_SET_END(tcp_bbr_check_kfunc_ids) -static DEFINE_KFUNC_BTF_ID_SET(&tcp_bbr_kfunc_ids, tcp_bbr_kfunc_btf_set); +static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &tcp_bbr_check_kfunc_ids, +}; static int __init bbr_register(void) { int ret; BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); - ret = tcp_register_congestion_control(&tcp_bbr_cong_ops); - if (ret) + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_bbr_kfunc_set); + if (ret < 0) return ret; - register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set); - return 0; + return tcp_register_congestion_control(&tcp_bbr_cong_ops); } static void __exit bbr_unregister(void) { - unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set); tcp_unregister_congestion_control(&tcp_bbr_cong_ops); } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index e07837e23b3f..24d562dd6225 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { .name = "cubic", }; -BTF_SET_START(tcp_cubic_kfunc_ids) +BTF_SET_START(tcp_cubic_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID(func, cubictcp_init) @@ -496,9 +496,12 @@ BTF_ID(func, cubictcp_cwnd_event) BTF_ID(func, cubictcp_acked) #endif #endif -BTF_SET_END(tcp_cubic_kfunc_ids) +BTF_SET_END(tcp_cubic_check_kfunc_ids) -static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set); +static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &tcp_cubic_check_kfunc_ids, +}; static int __init cubictcp_register(void) { @@ -534,16 +537,14 @@ static int __init cubictcp_register(void) /* divide by bic_scale and by constant Srtt (100ms) */ do_div(cube_factor, bic_scale * 10); - ret = tcp_register_congestion_control(&cubictcp); - if (ret) + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_cubic_kfunc_set); + if (ret < 0) return ret; - register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set); - return 0; + return tcp_register_congestion_control(&cubictcp); } static void __exit cubictcp_unregister(void) { - unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set); tcp_unregister_congestion_control(&cubictcp); } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 0d7ab3cc7b61..1943a6630341 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -238,7 +238,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = { .name = "dctcp-reno", }; -BTF_SET_START(tcp_dctcp_kfunc_ids) +BTF_SET_START(tcp_dctcp_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID(func, dctcp_init) @@ -249,25 +249,27 @@ BTF_ID(func, dctcp_cwnd_undo) BTF_ID(func, dctcp_state) #endif #endif -BTF_SET_END(tcp_dctcp_kfunc_ids) +BTF_SET_END(tcp_dctcp_check_kfunc_ids) -static DEFINE_KFUNC_BTF_ID_SET(&tcp_dctcp_kfunc_ids, tcp_dctcp_kfunc_btf_set); +static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &tcp_dctcp_check_kfunc_ids, +}; static int __init dctcp_register(void) { int ret; BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE); - ret = tcp_register_congestion_control(&dctcp); - if (ret) + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_dctcp_kfunc_set); + if (ret < 0) return ret; - register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set); - return 0; + return tcp_register_congestion_control(&dctcp); } static void __exit dctcp_unregister(void) { - unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set); tcp_unregister_congestion_control(&dctcp); } diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index df3b292a8ffe..c0805d0d753f 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -109,26 +109,27 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; -BTF_SET_START(bpf_testmod_kfunc_ids) +BTF_SET_START(bpf_testmod_check_kfunc_ids) BTF_ID(func, bpf_testmod_test_mod_kfunc) -BTF_SET_END(bpf_testmod_kfunc_ids) +BTF_SET_END(bpf_testmod_check_kfunc_ids) -static DEFINE_KFUNC_BTF_ID_SET(&bpf_testmod_kfunc_ids, bpf_testmod_kfunc_btf_set); +static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { + .owner = THIS_MODULE, + .check_set = &bpf_testmod_check_kfunc_ids, +}; static int bpf_testmod_init(void) { int ret; - ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); - if (ret) + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); + if (ret < 0) return ret; - register_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); - return 0; + return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); } static void bpf_testmod_exit(void) { - unregister_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set); return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); } -- cgit v1.2.3-71-gd317 From d583691c47dc0424ebe926000339a6d6cd590ff7 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Fri, 14 Jan 2022 22:09:47 +0530 Subject: bpf: Introduce mem, size argument pair support for kfunc BPF helpers can associate two adjacent arguments together to pass memory of certain size, using ARG_PTR_TO_MEM and ARG_CONST_SIZE arguments. Since we don't use bpf_func_proto for kfunc, we need to leverage BTF to implement similar support. The ARG_CONST_SIZE processing for helpers is refactored into a common check_mem_size_reg helper that is shared with kfunc as well. kfunc ptr_to_mem support follows logic similar to global functions, where verification is done as if pointer is not null, even when it may be null. This leads to a simple to follow rule for writing kfunc: always check the argument pointer for NULL, except when it is PTR_TO_CTX. Also, the PTR_TO_CTX case is also only safe when the helper expecting pointer to program ctx is not exposed to other programs where same struct is not ctx type. In that case, the type check will fall through to other cases and would permit passing other types of pointers, possibly NULL at runtime. Currently, we require the size argument to be suffixed with "__sz" in the parameter name. This information is then recorded in kernel BTF and verified during function argument checking. In the future we can use BTF tagging instead, and modify the kernel function definitions. This will be a purely kernel-side change. This allows us to have some form of backwards compatibility for structures that are passed in to the kernel function with their size, and allow variable length structures to be passed in if they are accompanied by a size parameter. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20220114163953.1455836-5-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 2 + kernel/bpf/btf.c | 48 +++++++++++++++-- kernel/bpf/verifier.c | 124 +++++++++++++++++++++++++++---------------- 3 files changed, 126 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 143401d4c9d9..857fd687bdc2 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -521,6 +521,8 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); +int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, u32 mem_size); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 4be5cf629ca9..cf46694cb266 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5654,6 +5654,32 @@ static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, return true; } +static bool is_kfunc_arg_mem_size(const struct btf *btf, + const struct btf_param *arg, + const struct bpf_reg_state *reg) +{ + int len, sfx_len = sizeof("__sz") - 1; + const struct btf_type *t; + const char *param_name; + + t = btf_type_skip_modifiers(btf, arg->type, NULL); + if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) + return false; + + /* In the future, this can be ported to use BTF tagging */ + param_name = btf_name_by_offset(btf, arg->name_off); + if (str_is_empty(param_name)) + return false; + len = strlen(param_name); + if (len < sfx_len) + return false; + param_name += len - sfx_len; + if (strncmp(param_name, "__sz", sfx_len)) + return false; + + return true; +} + static int btf_check_func_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, struct bpf_reg_state *regs, @@ -5765,17 +5791,33 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, u32 type_size; if (is_kfunc) { + bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], ®s[regno + 1]); + /* Permit pointer to mem, but only when argument * type is pointer to scalar, or struct composed * (recursively) of scalars. + * When arg_mem_size is true, the pointer can be + * void *. */ if (!btf_type_is_scalar(ref_t) && - !__btf_type_is_scalar_struct(log, btf, ref_t, 0)) { + !__btf_type_is_scalar_struct(log, btf, ref_t, 0) && + (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { bpf_log(log, - "arg#%d pointer type %s %s must point to scalar or struct with scalar\n", - i, btf_type_str(ref_t), ref_tname); + "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", + i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); return -EINVAL; } + + /* Check for mem, len pair */ + if (arg_mem_size) { + if (check_kfunc_mem_size_reg(env, ®s[regno + 1], regno + 1)) { + bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", + i, i + 1); + return -EINVAL; + } + i++; + continue; + } } resolve_ret = btf_resolve_size(btf, ref_t, &type_size); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 72802c1eb5ac..2b186185b6b2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4864,6 +4864,62 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, } } +static int check_mem_size_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, u32 regno, + bool zero_size_allowed, + struct bpf_call_arg_meta *meta) +{ + int err; + + /* This is used to refine r0 return value bounds for helpers + * that enforce this value as an upper bound on return values. + * See do_refine_retval_range() for helpers that can refine + * the return value. C type of helper is u32 so we pull register + * bound from umax_value however, if negative verifier errors + * out. Only upper bounds can be learned because retval is an + * int type and negative retvals are allowed. + */ + if (meta) + meta->msize_max_value = reg->umax_value; + + /* The register is SCALAR_VALUE; the access check + * happens using its boundaries. + */ + if (!tnum_is_const(reg->var_off)) + /* For unprivileged variable accesses, disable raw + * mode so that the program is required to + * initialize all the memory that the helper could + * just partially fill up. + */ + meta = NULL; + + if (reg->smin_value < 0) { + verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", + regno); + return -EACCES; + } + + if (reg->umin_value == 0) { + err = check_helper_mem_access(env, regno - 1, 0, + zero_size_allowed, + meta); + if (err) + return err; + } + + if (reg->umax_value >= BPF_MAX_VAR_SIZ) { + verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", + regno); + return -EACCES; + } + err = check_helper_mem_access(env, regno - 1, + reg->umax_value, + zero_size_allowed, meta); + if (!err) + err = mark_chain_precision(env, regno); + return err; +} + int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, u32 mem_size) { @@ -4887,6 +4943,28 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, return check_helper_mem_access(env, regno, mem_size, true, NULL); } +int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + u32 regno) +{ + struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; + bool may_be_null = type_may_be_null(mem_reg->type); + struct bpf_reg_state saved_reg; + int err; + + WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); + + if (may_be_null) { + saved_reg = *mem_reg; + mark_ptr_not_null_reg(mem_reg); + } + + err = check_mem_size_reg(env, reg, regno, true, NULL); + + if (may_be_null) + *mem_reg = saved_reg; + return err; +} + /* Implementation details: * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL * Two bpf_map_lookups (even with the same key) will have different reg->id. @@ -5408,51 +5486,7 @@ skip_type_check: } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); - /* This is used to refine r0 return value bounds for helpers - * that enforce this value as an upper bound on return values. - * See do_refine_retval_range() for helpers that can refine - * the return value. C type of helper is u32 so we pull register - * bound from umax_value however, if negative verifier errors - * out. Only upper bounds can be learned because retval is an - * int type and negative retvals are allowed. - */ - meta->msize_max_value = reg->umax_value; - - /* The register is SCALAR_VALUE; the access check - * happens using its boundaries. - */ - if (!tnum_is_const(reg->var_off)) - /* For unprivileged variable accesses, disable raw - * mode so that the program is required to - * initialize all the memory that the helper could - * just partially fill up. - */ - meta = NULL; - - if (reg->smin_value < 0) { - verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", - regno); - return -EACCES; - } - - if (reg->umin_value == 0) { - err = check_helper_mem_access(env, regno - 1, 0, - zero_size_allowed, - meta); - if (err) - return err; - } - - if (reg->umax_value >= BPF_MAX_VAR_SIZ) { - verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", - regno); - return -EACCES; - } - err = check_helper_mem_access(env, regno - 1, - reg->umax_value, - zero_size_allowed, meta); - if (!err) - err = mark_chain_precision(env, regno); + err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta); } else if (arg_type_is_alloc_size(arg_type)) { if (!tnum_is_const(reg->var_off)) { verbose(env, "R%d is not a known constant'\n", -- cgit v1.2.3-71-gd317 From 5c073f26f9dc78a6c8194b23eac7537c9692c7d7 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Fri, 14 Jan 2022 22:09:48 +0530 Subject: bpf: Add reference tracking support to kfunc This patch adds verifier support for PTR_TO_BTF_ID return type of kfunc to be a reference, by reusing acquire_reference_state/release_reference support for existing in-kernel bpf helpers. We make use of the three kfunc types: - BTF_KFUNC_TYPE_ACQUIRE Return true if kfunc_btf_id is an acquire kfunc. This will acquire_reference_state for the returned PTR_TO_BTF_ID (this is the only allow return value). Note that acquire kfunc must always return a PTR_TO_BTF_ID{_OR_NULL}, otherwise the program is rejected. - BTF_KFUNC_TYPE_RELEASE Return true if kfunc_btf_id is a release kfunc. This will release the reference to the passed in PTR_TO_BTF_ID which has a reference state (from earlier acquire kfunc). The btf_check_func_arg_match returns the regno (of argument register, hence > 0) if the kfunc is a release kfunc, and a proper referenced PTR_TO_BTF_ID is being passed to it. This is similar to how helper call check uses bpf_call_arg_meta to store the ref_obj_id that is later used to release the reference. Similar to in-kernel helper, we only allow passing one referenced PTR_TO_BTF_ID as an argument. It can also be passed in to normal kfunc, but in case of release kfunc there must always be one PTR_TO_BTF_ID argument that is referenced. - BTF_KFUNC_TYPE_RET_NULL For kfunc returning PTR_TO_BTF_ID, tells if it can be NULL, hence force caller to mark the pointer not null (using check) before accessing it. Note that taking into account the case fixed by commit 93c230e3f5bd ("bpf: Enforce id generation for all may-be-null register type") we assign a non-zero id for mark_ptr_or_null_reg logic. Later, if more return types are supported by kfunc, which have a _OR_NULL variant, it might be better to move this id generation under a common reg_type_may_be_null check, similar to the case in the commit. Referenced PTR_TO_BTF_ID is currently only limited to kfunc, but can be extended in the future to other BPF helpers as well. For now, we can rely on the btf_struct_ids_match check to ensure we get the pointer to the expected struct type. In the future, care needs to be taken to avoid ambiguity for reference PTR_TO_BTF_ID passed to release function, in case multiple candidates can release same BTF ID. e.g. there might be two release kfuncs (or kfunc and helper): foo(struct abc *p); bar(struct abc *p); ... such that both release a PTR_TO_BTF_ID with btf_id of struct abc. In this case we would need to track the acquire function corresponding to the release function to avoid type confusion, and store this information in the register state so that an incorrect program can be rejected. This is not a problem right now, hence it is left as an exercise for the future patch introducing such a case in the kernel. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20220114163953.1455836-6-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 5 +++++ kernel/bpf/btf.c | 32 +++++++++++++++++++++++++-- kernel/bpf/verifier.c | 52 +++++++++++++++++++++++++++++++++++--------- 3 files changed, 77 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 857fd687bdc2..ac4797155412 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -566,4 +566,9 @@ static inline u32 type_flag(u32 type) return type & ~BPF_BASE_TYPE_MASK; } +static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) +{ + return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; +} + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index cf46694cb266..57f5fd5af2f9 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5686,11 +5686,13 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, bool ptr_to_mem_ok) { struct bpf_verifier_log *log = &env->log; + u32 i, nargs, ref_id, ref_obj_id = 0; bool is_kfunc = btf_is_kernel(btf); const char *func_name, *ref_tname; const struct btf_type *t, *ref_t; const struct btf_param *args; - u32 i, nargs, ref_id; + int ref_regno = 0; + bool rel = false; t = btf_type_by_id(btf, func_id); if (!t || !btf_type_is_func(t)) { @@ -5768,6 +5770,16 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, if (reg->type == PTR_TO_BTF_ID) { reg_btf = reg->btf; reg_ref_id = reg->btf_id; + /* Ensure only one argument is referenced PTR_TO_BTF_ID */ + if (reg->ref_obj_id) { + if (ref_obj_id) { + bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", + regno, reg->ref_obj_id, ref_obj_id); + return -EFAULT; + } + ref_regno = regno; + ref_obj_id = reg->ref_obj_id; + } } else { reg_btf = btf_vmlinux; reg_ref_id = *reg2btf_ids[reg->type]; @@ -5838,7 +5850,23 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, } } - return 0; + /* Either both are set, or neither */ + WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); + if (is_kfunc) { + rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_RELEASE, func_id); + /* We already made sure ref_obj_id is set only for one argument */ + if (rel && !ref_obj_id) { + bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", + func_name); + return -EINVAL; + } + /* Allow (!rel && ref_obj_id), so that passing such referenced PTR_TO_BTF_ID to + * other kfuncs works + */ + } + /* returns argument register number > 0 in case of reference release kfunc */ + return rel ? ref_regno : 0; } /* Compare BTF of a function with given bpf_reg_state. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2b186185b6b2..8c5a46d41f28 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -452,7 +452,8 @@ static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) { return base_type(type) == PTR_TO_SOCKET || base_type(type) == PTR_TO_TCP_SOCK || - base_type(type) == PTR_TO_MEM; + base_type(type) == PTR_TO_MEM || + base_type(type) == PTR_TO_BTF_ID; } static bool type_is_rdonly_mem(u32 type) @@ -3493,11 +3494,6 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, #define MAX_PACKET_OFF 0xffff -static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) -{ - return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; -} - static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) @@ -6845,15 +6841,17 @@ static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, } } -static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) +static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx_p) { const struct btf_type *t, *func, *func_proto, *ptr_type; struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; u32 i, nargs, func_id, ptr_type_id; + int err, insn_idx = *insn_idx_p; const struct btf_param *args; struct btf *desc_btf; - int err; + bool acq; /* skip for now, but return error when we find this in fixup_kfunc_call */ if (!insn->imm) @@ -6875,16 +6873,36 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EACCES; } + acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_ACQUIRE, func_id); + /* Check the arguments */ err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs); - if (err) + if (err < 0) return err; + /* In case of release function, we get register number of refcounted + * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now + */ + if (err) { + err = release_reference(env, regs[err].ref_obj_id); + if (err) { + verbose(env, "kfunc %s#%d reference has not been acquired before\n", + func_name, func_id); + return err; + } + } for (i = 0; i < CALLER_SAVED_REGS; i++) mark_reg_not_init(env, regs, caller_saved[i]); /* Check return type */ t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL); + + if (acq && !btf_type_is_ptr(t)) { + verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); + return -EINVAL; + } + if (btf_type_is_scalar(t)) { mark_reg_unknown(env, regs, BPF_REG_0); mark_btf_func_reg_size(env, BPF_REG_0, t->size); @@ -6903,7 +6921,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; + if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_RET_NULL, func_id)) { + regs[BPF_REG_0].type |= PTR_MAYBE_NULL; + /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ + regs[BPF_REG_0].id = ++env->id_gen; + } mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); + if (acq) { + int id = acquire_reference_state(env, insn_idx); + + if (id < 0) + return id; + regs[BPF_REG_0].id = id; + regs[BPF_REG_0].ref_obj_id = id; + } } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */ nargs = btf_type_vlen(func_proto); @@ -11548,7 +11580,7 @@ static int do_check(struct bpf_verifier_env *env) if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &env->insn_idx); else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) - err = check_kfunc_call(env, insn); + err = check_kfunc_call(env, insn, &env->insn_idx); else err = check_helper_call(env, insn, &env->insn_idx); if (err) -- cgit v1.2.3-71-gd317 From 75ab2b3633ccddd8f7bdf6c76f9ab3f9b2fc5d9d Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 28 Oct 2021 13:19:22 +0200 Subject: dma-buf: drop excl_fence parameter from dma_resv_get_fences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Returning the exclusive fence separately is no longer used. Instead add a write parameter to indicate the use case. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211207123411.167006-4-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 46 ++++++++++------------------ drivers/dma-buf/st-dma-resv.c | 26 +++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 3 +- include/linux/dma-resv.h | 4 +-- 6 files changed, 30 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 4deea75c0b9c..6dd9a40b55d4 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -542,57 +542,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object - * @fence_excl: the returned exclusive fence (or NULL) - * @shared_count: the number of shared fences returned - * @shared: the array of shared fence ptrs returned (array is krealloc'd to - * the required size, and must be freed by caller) + * @write: true if we should return all fences + * @num_fences: the number of fences returned + * @fences: the array of fence ptrs returned (array is krealloc'd to the + * required size, and must be freed by caller) * - * Retrieve all fences from the reservation object. If the pointer for the - * exclusive fence is not specified the fence is put into the array of the - * shared fences as well. Returns either zero or -ENOMEM. + * Retrieve all fences from the reservation object. + * Returns either zero or -ENOMEM. */ -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl, - unsigned int *shared_count, struct dma_fence ***shared) +int dma_resv_get_fences(struct dma_resv *obj, bool write, + unsigned int *num_fences, struct dma_fence ***fences) { struct dma_resv_iter cursor; struct dma_fence *fence; - *shared_count = 0; - *shared = NULL; + *num_fences = 0; + *fences = NULL; - if (fence_excl) - *fence_excl = NULL; - - dma_resv_iter_begin(&cursor, obj, true); + dma_resv_iter_begin(&cursor, obj, write); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (dma_resv_iter_is_restarted(&cursor)) { unsigned int count; - while (*shared_count) - dma_fence_put((*shared)[--(*shared_count)]); - - if (fence_excl) - dma_fence_put(*fence_excl); + while (*num_fences) + dma_fence_put((*fences)[--(*num_fences)]); - count = cursor.shared_count; - count += fence_excl ? 0 : 1; + count = cursor.shared_count + 1; /* Eventually re-allocate the array */ - *shared = krealloc_array(*shared, count, + *fences = krealloc_array(*fences, count, sizeof(void *), GFP_KERNEL); - if (count && !*shared) { + if (count && !*fences) { dma_resv_iter_end(&cursor); return -ENOMEM; } } - dma_fence_get(fence); - if (dma_resv_iter_is_exclusive(&cursor) && fence_excl) - *fence_excl = fence; - else - (*shared)[(*shared_count)++] = fence; + (*fences)[(*num_fences)++] = dma_fence_get(fence); } dma_resv_iter_end(&cursor); diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index bc32b3eedcb6..cbe999c6e7a6 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg) static int test_get_fences(void *arg, bool shared) { - struct dma_fence *f, *excl = NULL, **fences = NULL; + struct dma_fence *f, **fences = NULL; struct dma_resv resv; int r, i; @@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared) } dma_resv_unlock(&resv); - r = dma_resv_get_fences(&resv, &excl, &i, &fences); + r = dma_resv_get_fences(&resv, shared, &i, &fences); if (r) { pr_err("get_fences failed\n"); goto err_free; } - if (shared) { - if (excl != NULL) { - pr_err("get_fences returned unexpected excl fence\n"); - goto err_free; - } - if (i != 1 || fences[0] != f) { - pr_err("get_fences returned unexpected shared fence\n"); - goto err_free; - } - } else { - if (excl != f) { - pr_err("get_fences returned unexpected excl fence\n"); - goto err_free; - } - if (i != 0) { - pr_err("get_fences returned unexpected shared fence\n"); - goto err_free; - } + if (i != 1 || fences[0] != f) { + pr_err("get_fences returned unexpected fence\n"); + goto err_free; } dma_fence_signal(f); err_free: - dma_fence_put(excl); while (i--) dma_fence_put(fences[i]); kfree(fences); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 18cc7155e667..9a273420a67a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL, - &work->shared_count, &work->shared); + /* TODO: Unify this with other drivers */ + r = dma_resv_get_fences(new_abo->tbo.base.resv, true, + &work->shared_count, + &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b7fb72bff2c1..be48487e2ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned count; int r; - r = dma_resv_get_fences(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, true, &count, &fences); if (r) goto fallback; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index b5e8ce86dbe7..64c90ff348f2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences(robj, NULL, - &bo->nr_shared, + ret = dma_resv_get_fences(robj, true, &bo->nr_shared, &bo->shared); if (ret) return ret; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index eebf04325b34..a715df97b31a 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -458,8 +458,8 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, - unsigned *pshared_count, struct dma_fence ***pshared); +int dma_resv_get_fences(struct dma_resv *obj, bool write, + unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, unsigned long timeout); -- cgit v1.2.3-71-gd317 From f10d059661968b01ef61a8b516775f95a18ab8ae Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 16 Dec 2021 02:04:25 +0000 Subject: bpf: Make BPF_PROG_RUN_ARRAY return -err instead of allow boolean Right now BPF_PROG_RUN_ARRAY and related macros return 1 or 0 for whether the prog array allows or rejects whatever is being hooked. The caller of these macros then return -EPERM or continue processing based on thw macro's return value. Unforunately this is inflexible, since -EPERM is the only err that can be returned. This patch should be a no-op; it prepares for the next patch. The returning of the -EPERM is moved to inside the macros, so the outer functions are directly returning what the macros returned if they are non-zero. Signed-off-by: YiFei Zhu Reviewed-by: Stanislav Fomichev Link: https://lore.kernel.org/r/788abcdca55886d1f43274c918eaa9f792a9f33b.1639619851.git.zhuyifei@google.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 16 +++++++++------- kernel/bpf/cgroup.c | 41 +++++++++++++++-------------------------- security/device_cgroup.c | 2 +- 3 files changed, 25 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6d7346c54d83..83da1764fcfe 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1277,7 +1277,7 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); -static __always_inline u32 +static __always_inline int BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, const void *ctx, bpf_prog_run_fn run_prog, u32 *ret_flags) @@ -1287,7 +1287,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; + int ret = 0; u32 func_ret; migrate_disable(); @@ -1298,7 +1298,8 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); - ret &= (func_ret & 1); + if (!(func_ret & 1)) + ret = -EPERM; *(ret_flags) |= (func_ret >> 1); item++; } @@ -1308,7 +1309,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, return ret; } -static __always_inline u32 +static __always_inline int BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, const void *ctx, bpf_prog_run_fn run_prog) { @@ -1317,7 +1318,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; + int ret = 0; migrate_disable(); rcu_read_lock(); @@ -1326,7 +1327,8 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; - ret &= run_prog(prog, ctx); + if (!run_prog(prog, ctx)) + ret = -EPERM; item++; } bpf_reset_run_ctx(old_run_ctx); @@ -1394,7 +1396,7 @@ out: u32 _ret; \ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ - if (_ret) \ + if (!_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 514b4681a90a..386155d279b3 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1080,7 +1080,6 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, } else { ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb); - ret = (ret == 1 ? 0 : -EPERM); } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); @@ -1107,10 +1106,9 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, enum cgroup_bpf_attach_type atype) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - int ret; - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run); - return ret == 1 ? 0 : -EPERM; + return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, + bpf_prog_run); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); @@ -1142,7 +1140,6 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, }; struct sockaddr_storage unspec; struct cgroup *cgrp; - int ret; /* Check socket family since not all sockets represent network * endpoint (e.g. AF_UNIX). @@ -1156,10 +1153,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, } cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run, flags); - - return ret == 1 ? 0 : -EPERM; + return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, + bpf_prog_run, flags); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); @@ -1184,11 +1179,9 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, enum cgroup_bpf_attach_type atype) { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - int ret; - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, - bpf_prog_run); - return ret == 1 ? 0 : -EPERM; + return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, + bpf_prog_run); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); @@ -1201,15 +1194,15 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, .major = major, .minor = minor, }; - int allow; + int ret; rcu_read_lock(); cgrp = task_dfl_cgroup(current); - allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run); + ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, + bpf_prog_run); rcu_read_unlock(); - return !allow; + return ret; } static const struct bpf_func_proto * @@ -1350,7 +1343,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, kfree(ctx.new_val); } - return ret == 1 ? 0 : -EPERM; + return ret; } #ifdef CONFIG_NET @@ -1455,10 +1448,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, &ctx, bpf_prog_run); release_sock(sk); - if (!ret) { - ret = -EPERM; + if (ret) goto out; - } if (ctx.optlen == -1) { /* optlen set to -1, bypass kernel */ @@ -1565,10 +1556,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, &ctx, bpf_prog_run); release_sock(sk); - if (!ret) { - ret = -EPERM; + if (ret) goto out; - } if (ctx.optlen > max_optlen || ctx.optlen < 0) { ret = -EFAULT; @@ -1624,8 +1613,8 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], &ctx, bpf_prog_run); - if (!ret) - return -EPERM; + if (ret) + return ret; if (ctx.optlen > *optlen) return -EFAULT; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 842889f3dcb7..a9f8c63a96d1 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -838,7 +838,7 @@ int devcgroup_check_permission(short type, u32 major, u32 minor, short access) int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); if (rc) - return -EPERM; + return rc; #ifdef CONFIG_CGROUP_DEVICE return devcgroup_legacy_check_permission(type, major, minor, access); -- cgit v1.2.3-71-gd317 From c4dcfdd406aa2167396ac215e351e5e4dfd7efe3 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 16 Dec 2021 02:04:26 +0000 Subject: bpf: Move getsockopt retval to struct bpf_cg_run_ctx The retval value is moved to struct bpf_cg_run_ctx for ease of access in different prog types with different context structs layouts. The helper implementation (to be added in a later patch in the series) can simply perform a container_of from current->bpf_ctx to retrieve bpf_cg_run_ctx. Unfortunately, there is no easy way to access the current task_struct via the verifier BPF bytecode rewrite, aside from possibly calling a helper, so a pointer to current task is added to struct bpf_sockopt_kern so that the rewritten BPF bytecode can access struct bpf_cg_run_ctx with an indirection. For backward compatibility, if a getsockopt program rejects a syscall by returning 0, an -EPERM will be generated, by having the BPF_PROG_RUN_ARRAY_CG family macros automatically set the retval to -EPERM. Unlike prior to this patch, this -EPERM will be visible to ctx->retval for any other hooks down the line in the prog array. Additionally, the restriction that getsockopt filters can only set the retval to 0 is removed, considering that certain getsockopt implementations may return optlen. Filters are now able to set the value arbitrarily. Signed-off-by: YiFei Zhu Reviewed-by: Stanislav Fomichev Link: https://lore.kernel.org/r/73b0325f5c29912ccea7ea57ec1ed4d388fc1d37.1639619851.git.zhuyifei@google.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 20 ++++++------ include/linux/filter.h | 5 ++- kernel/bpf/cgroup.c | 82 +++++++++++++++++++++++++++++--------------------- 3 files changed, 63 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 83da1764fcfe..7b0c11f414d0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1245,6 +1245,7 @@ struct bpf_run_ctx {}; struct bpf_cg_run_ctx { struct bpf_run_ctx run_ctx; const struct bpf_prog_array_item *prog_item; + int retval; }; struct bpf_trace_run_ctx { @@ -1280,16 +1281,16 @@ typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); static __always_inline int BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, const void *ctx, bpf_prog_run_fn run_prog, - u32 *ret_flags) + int retval, u32 *ret_flags) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - int ret = 0; u32 func_ret; + run_ctx.retval = retval; migrate_disable(); rcu_read_lock(); array = rcu_dereference(array_rcu); @@ -1299,27 +1300,28 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); if (!(func_ret & 1)) - ret = -EPERM; + run_ctx.retval = -EPERM; *(ret_flags) |= (func_ret >> 1); item++; } bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); - return ret; + return run_ctx.retval; } static __always_inline int BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog) + const void *ctx, bpf_prog_run_fn run_prog, + int retval) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - int ret = 0; + run_ctx.retval = retval; migrate_disable(); rcu_read_lock(); array = rcu_dereference(array_rcu); @@ -1328,13 +1330,13 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; if (!run_prog(prog, ctx)) - ret = -EPERM; + run_ctx.retval = -EPERM; item++; } bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); - return ret; + return run_ctx.retval; } static __always_inline u32 @@ -1394,7 +1396,7 @@ out: u32 _flags = 0; \ bool _cn; \ u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ + _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ if (!_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ diff --git a/include/linux/filter.h b/include/linux/filter.h index 71fa57b88bfc..d23e999dc032 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1356,7 +1356,10 @@ struct bpf_sockopt_kern { s32 level; s32 optname; s32 optlen; - s32 retval; + /* for retval in struct bpf_cg_run_ctx */ + struct task_struct *current_task; + /* Temporary "register" for indirect stores to ppos. */ + u64 tmp_reg; }; int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 386155d279b3..b6fad0bbf5a7 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1079,7 +1079,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb); } else { ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, - __bpf_prog_run_save_cb); + __bpf_prog_run_save_cb, 0); } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); @@ -1108,7 +1108,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, - bpf_prog_run); + bpf_prog_run, 0); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); @@ -1154,7 +1154,7 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run, flags); + bpf_prog_run, 0, flags); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); @@ -1181,7 +1181,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, - bpf_prog_run); + bpf_prog_run, 0); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); @@ -1199,7 +1199,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, rcu_read_lock(); cgrp = task_dfl_cgroup(current); ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run); + bpf_prog_run, 0); rcu_read_unlock(); return ret; @@ -1330,7 +1330,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, rcu_read_lock(); cgrp = task_dfl_cgroup(current); - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run); + ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, + bpf_prog_run, 0); rcu_read_unlock(); kfree(ctx.cur_val); @@ -1445,7 +1446,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, lock_sock(sk); ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT], - &ctx, bpf_prog_run); + &ctx, bpf_prog_run, 0); release_sock(sk); if (ret) @@ -1509,7 +1510,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, .sk = sk, .level = level, .optname = optname, - .retval = retval, + .current_task = current, }; int ret; @@ -1553,10 +1554,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, lock_sock(sk); ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], - &ctx, bpf_prog_run); + &ctx, bpf_prog_run, retval); release_sock(sk); - if (ret) + if (ret < 0) goto out; if (ctx.optlen > max_optlen || ctx.optlen < 0) { @@ -1564,14 +1565,6 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - /* BPF programs only allowed to set retval to 0, not some - * arbitrary value. - */ - if (ctx.retval != 0 && ctx.retval != retval) { - ret = -EFAULT; - goto out; - } - if (ctx.optlen != 0) { if (copy_to_user(optval, ctx.optval, ctx.optlen) || put_user(ctx.optlen, optlen)) { @@ -1580,8 +1573,6 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, } } - ret = ctx.retval; - out: sockopt_free_buf(&ctx, &buf); return ret; @@ -1596,10 +1587,10 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, .sk = sk, .level = level, .optname = optname, - .retval = retval, .optlen = *optlen, .optval = optval, .optval_end = optval + *optlen, + .current_task = current, }; int ret; @@ -1612,25 +1603,19 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, */ ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], - &ctx, bpf_prog_run); - if (ret) + &ctx, bpf_prog_run, retval); + if (ret < 0) return ret; if (ctx.optlen > *optlen) return -EFAULT; - /* BPF programs only allowed to set retval to 0, not some - * arbitrary value. - */ - if (ctx.retval != 0 && ctx.retval != retval) - return -EFAULT; - /* BPF programs can shrink the buffer, export the modifications. */ if (ctx.optlen != 0) *optlen = ctx.optlen; - return ctx.retval; + return ret; } #endif @@ -2046,10 +2031,39 @@ static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); break; case offsetof(struct bpf_sockopt, retval): - if (type == BPF_WRITE) - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); - else - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); + BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0); + + if (type == BPF_WRITE) { + int treg = BPF_REG_9; + + if (si->src_reg == treg || si->dst_reg == treg) + --treg; + if (si->src_reg == treg || si->dst_reg == treg) + --treg; + *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg, + offsetof(struct bpf_sockopt_kern, tmp_reg)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), + treg, si->dst_reg, + offsetof(struct bpf_sockopt_kern, current_task)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), + treg, treg, + offsetof(struct task_struct, bpf_ctx)); + *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), + treg, si->src_reg, + offsetof(struct bpf_cg_run_ctx, retval)); + *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg, + offsetof(struct bpf_sockopt_kern, tmp_reg)); + } else { + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sockopt_kern, current_task)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), + si->dst_reg, si->dst_reg, + offsetof(struct task_struct, bpf_ctx)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), + si->dst_reg, si->dst_reg, + offsetof(struct bpf_cg_run_ctx, retval)); + } break; case offsetof(struct bpf_sockopt, optval): *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); -- cgit v1.2.3-71-gd317 From b44123b4a3dcad4664d3a0f72c011ffd4c9c4d93 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 16 Dec 2021 02:04:27 +0000 Subject: bpf: Add cgroup helpers bpf_{get,set}_retval to get/set syscall return value The helpers continue to use int for retval because all the hooks are int-returning rather than long-returning. The return value of bpf_set_retval is int for future-proofing, in case in the future there may be errors trying to set the retval. After the previous patch, if a program rejects a syscall by returning 0, an -EPERM will be generated no matter if the retval is already set to -err. This patch change it being forced only if retval is not -err. This is because we want to support, for example, invoking bpf_set_retval(-EINVAL) and return 0, and have the syscall return value be -EINVAL not -EPERM. For BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY, the prior behavior is that, if the return value is NET_XMIT_DROP, the packet is silently dropped. We preserve this behavior for backward compatibility reasons, so even if an errno is set, the errno does not return to caller. However, setting a non-err to retval cannot propagate so this is not allowed and we return a -EFAULT in that case. Signed-off-by: YiFei Zhu Reviewed-by: Stanislav Fomichev Link: https://lore.kernel.org/r/b4013fd5d16bed0b01977c1fafdeae12e1de61fb.1639619851.git.zhuyifei@google.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 10 ++++++---- include/uapi/linux/bpf.h | 18 ++++++++++++++++++ kernel/bpf/cgroup.c | 38 +++++++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 18 ++++++++++++++++++ 4 files changed, 79 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7b0c11f414d0..dce54eb0aae8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1299,7 +1299,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); - if (!(func_ret & 1)) + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval)) run_ctx.retval = -EPERM; *(ret_flags) |= (func_ret >> 1); item++; @@ -1329,7 +1329,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; - if (!run_prog(prog, ctx)) + if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) run_ctx.retval = -EPERM; item++; } @@ -1389,7 +1389,7 @@ out: * 0: NET_XMIT_SUCCESS skb should be transmitted * 1: NET_XMIT_DROP skb should be dropped and cn * 2: NET_XMIT_CN skb should be transmitted and cn - * 3: -EPERM skb should be dropped + * 3: -err skb should be dropped */ #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ ({ \ @@ -1398,10 +1398,12 @@ out: u32 _ret; \ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ + if (_ret && !IS_ERR_VALUE((long)_ret)) \ + _ret = -EFAULT; \ if (!_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ - _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret = (_cn ? NET_XMIT_DROP : _ret); \ _ret; \ }) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a9c96c21330a..fe2272defcd9 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5033,6 +5033,22 @@ union bpf_attr { * * Return * The number of arguments of the traced function. + * + * int bpf_get_retval(void) + * Description + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * The syscall's return value. + * + * int bpf_set_retval(int retval) + * Description + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5221,6 +5237,8 @@ union bpf_attr { FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ + FN(get_retval), \ + FN(set_retval), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index b6fad0bbf5a7..279ebbed75a5 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1044,7 +1044,7 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr * NET_XMIT_CN (2) - continue with packet output and notify TCP * to call cwr - * -EPERM - drop packet + * -err - drop packet * * For ingress packets, this function will return -EPERM if any * attached program was found and if it returned != 1 during execution. @@ -1080,6 +1080,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, } else { ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb, 0); + if (ret && !IS_ERR_VALUE((long)ret)) + ret = -EFAULT; } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); @@ -1205,6 +1207,36 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, return ret; } +BPF_CALL_0(bpf_get_retval) +{ + struct bpf_cg_run_ctx *ctx = + container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); + + return ctx->retval; +} + +static const struct bpf_func_proto bpf_get_retval_proto = { + .func = bpf_get_retval, + .gpl_only = false, + .ret_type = RET_INTEGER, +}; + +BPF_CALL_1(bpf_set_retval, int, retval) +{ + struct bpf_cg_run_ctx *ctx = + container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); + + ctx->retval = retval; + return 0; +} + +static const struct bpf_func_proto bpf_set_retval_proto = { + .func = bpf_set_retval, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1217,6 +1249,10 @@ cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; + case BPF_FUNC_get_retval: + return &bpf_get_retval_proto; + case BPF_FUNC_set_retval: + return &bpf_set_retval_proto; default: return bpf_base_func_proto(func_id); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a9c96c21330a..fe2272defcd9 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5033,6 +5033,22 @@ union bpf_attr { * * Return * The number of arguments of the traced function. + * + * int bpf_get_retval(void) + * Description + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * The syscall's return value. + * + * int bpf_set_retval(int retval) + * Description + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5221,6 +5237,8 @@ union bpf_attr { FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ + FN(get_retval), \ + FN(set_retval), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From 47934e06b65637c88a762d9c98329ae6e3238888 Mon Sep 17 00:00:00 2001 From: Congyu Liu Date: Tue, 18 Jan 2022 14:20:13 -0500 Subject: net: fix information leakage in /proc/net/ptype In one net namespace, after creating a packet socket without binding it to a device, users in other net namespaces can observe the new `packet_type` added by this packet socket by reading `/proc/net/ptype` file. This is minor information leakage as packet socket is namespace aware. Add a net pointer in `packet_type` to keep the net namespace of of corresponding packet socket. In `ptype_seq_show`, this net pointer must be checked when it is not NULL. Fixes: 2feb27dbe00c ("[NETNS]: Minor information leak via /proc/net/ptype file.") Signed-off-by: Congyu Liu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + net/core/net-procfs.c | 3 ++- net/packet/af_packet.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3213c7227b59..e490b84732d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2548,6 +2548,7 @@ struct packet_type { struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); + struct net *af_packet_net; void *af_packet_priv; struct list_head list; }; diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index d8b9dbabd4a4..5b8016335aca 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -260,7 +260,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); - else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { + else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && + (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5bd409ab4cc2..85ea7ddb48db 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1774,6 +1774,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.af_packet_net = read_pnet(&match->net); match->prot_hook.id_match = match_fanout_group; match->max_num_members = args->max_num_members; list_add(&match->list, &fanout_list); @@ -3353,6 +3354,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; + po->prot_hook.af_packet_net = sock_net(sk); if (proto) { po->prot_hook.type = proto; -- cgit v1.2.3-71-gd317 From e2f08207c558bc0bc8abaa557cdb29bad776ac7b Mon Sep 17 00:00:00 2001 From: Moshe Tal Date: Thu, 20 Jan 2022 11:55:50 +0200 Subject: ethtool: Fix link extended state for big endian The link extended sub-states are assigned as enum that is an integer size but read from a union as u8, this is working for small values on little endian systems but for big endian this always give 0. Fix the variable in the union to match the enum size. Fixes: ecc31c60240b ("ethtool: Add link extended state") Signed-off-by: Moshe Tal Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Reviewed-by: Gal Pressman Reviewed-by: Amit Cohen Signed-off-by: David S. Miller --- include/linux/ethtool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index a26f37a27167..11efc45de66a 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -111,7 +111,7 @@ struct ethtool_link_ext_state_info { enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; enum ethtool_link_ext_substate_cable_issue cable_issue; enum ethtool_link_ext_substate_module module; - u8 __link_ext_substate; + u32 __link_ext_substate; }; }; -- cgit v1.2.3-71-gd317 From 5298d4bfe80f6ae6ae2777bcd1357b0022d98573 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Jan 2022 07:56:14 +0100 Subject: unicode: clean up the Kconfig symbol confusion Turn the CONFIG_UNICODE symbol into a tristate that generates some always built in code and remove the confusing CONFIG_UNICODE_UTF8_DATA symbol. Note that a lot of the IS_ENABLED() checks could be turned from cpp statements into normal ifs, but this change is intended to be fairly mechanic, so that should be cleaned up later. Fixes: 2b3d04787012 ("unicode: Add utf8-data module") Reported-by: Linus Torvalds Reviewed-by: Eric Biggers Signed-off-by: Christoph Hellwig Signed-off-by: Gabriel Krisman Bertazi --- fs/Makefile | 2 +- fs/ext4/ext4.h | 14 +++++++------- fs/ext4/hash.c | 2 +- fs/ext4/namei.c | 12 ++++++------ fs/ext4/super.c | 10 +++++----- fs/ext4/sysfs.c | 8 ++++---- fs/f2fs/dir.c | 10 +++++----- fs/f2fs/f2fs.h | 2 +- fs/f2fs/hash.c | 2 +- fs/f2fs/namei.c | 4 ++-- fs/f2fs/recovery.c | 4 ++-- fs/f2fs/super.c | 10 +++++----- fs/f2fs/sysfs.c | 10 +++++----- fs/libfs.c | 10 +++++----- fs/unicode/Kconfig | 18 +++++------------- fs/unicode/Makefile | 6 ++++-- include/linux/fs.h | 2 +- 17 files changed, 60 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/fs/Makefile b/fs/Makefile index 84c5e4cdfee5..c71ee0127866 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_EXPORTFS) += exportfs/ obj-$(CONFIG_NFSD) += nfsd/ obj-$(CONFIG_LOCKD) += lockd/ obj-$(CONFIG_NLS) += nls/ -obj-$(CONFIG_UNICODE) += unicode/ +obj-y += unicode/ obj-$(CONFIG_SYSV_FS) += sysv/ obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/ obj-$(CONFIG_CIFS) += cifs/ diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 71a3cdceaa03..242e74cfb060 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2485,7 +2485,7 @@ struct ext4_filename { #ifdef CONFIG_FS_ENCRYPTION struct fscrypt_str crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct fscrypt_str cf_name; #endif }; @@ -2721,7 +2721,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb, struct ext4_group_desc *gdp); ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, struct ext4_filename *fname); @@ -2754,7 +2754,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif return err; @@ -2773,7 +2773,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname); #endif return err; @@ -2790,7 +2790,7 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) fname->usr_fname = NULL; fname->disk_name.name = NULL; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) kfree(fname->cf_name.name); fname->cf_name.name = NULL; #endif @@ -2806,7 +2806,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir, fname->disk_name.name = (unsigned char *) iname->name; fname->disk_name.len = iname->len; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif @@ -2822,7 +2822,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir, static inline void ext4_fname_free_filename(struct ext4_filename *fname) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) kfree(fname->cf_name.name); fname->cf_name.name = NULL; #endif diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index f34f4176c1e7..147b5241dd94 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -290,7 +290,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len, int ext4fs_dirhash(const struct inode *dir, const char *name, int len, struct dx_hash_info *hinfo) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) const struct unicode_map *um = dir->i_sb->s_encoding; int r, dlen; unsigned char *buff; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 52c9bd154122..269d2d051ede 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1317,7 +1317,7 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) dx_set_count(entries, count + 1); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Test whether a case-insensitive directory entry matches the filename * being searched for. If quick is set, assume the name being looked up @@ -1428,7 +1428,7 @@ static bool ext4_match(struct inode *parent, f.crypto_buf = fname->crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) && (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) { if (fname->cf_name.name) { @@ -1800,7 +1800,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi } } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (!inode && IS_CASEFOLDED(dir)) { /* Eventually we want to call d_add_ci(dentry, NULL) * for negative dentries in the encoding case as @@ -2308,7 +2308,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, if (fscrypt_is_nokey_name(dentry)) return -ENOKEY; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (sb_has_strict_encoding(sb) && IS_CASEFOLDED(dir) && sb->s_encoding && utf8_validate(sb->s_encoding, &dentry->d_name)) return -EINVAL; @@ -3126,7 +3126,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) ext4_fc_track_unlink(handle, dentry); retval = ext4_mark_inode_dirty(handle, dir); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the @@ -3231,7 +3231,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry)); if (!retval) ext4_fc_track_unlink(handle, dentry); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the diff --git a/fs/ext4/super.c b/fs/ext4/super.c index db9fe4843529..52be1ca38eef 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1302,7 +1302,7 @@ static void ext4_put_super(struct super_block *sb) kfree(sbi->s_blockgroup_lock); fs_put_dax(sbi->s_daxdev); fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif kfree(sbi); @@ -1962,7 +1962,7 @@ static const struct mount_opts { {Opt_err, 0, 0} }; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) static const struct ext4_sb_encodings { __u16 magic; char *name; @@ -3609,7 +3609,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly) return 0; } -#ifndef CONFIG_UNICODE +#if !IS_ENABLED(CONFIG_UNICODE) if (ext4_has_feature_casefold(sb)) { ext4_msg(sb, KERN_ERR, "Filesystem with casefold feature cannot be " @@ -4613,7 +4613,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) if (err < 0) goto failed_mount; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (ext4_has_feature_casefold(sb) && !sb->s_encoding) { const struct ext4_sb_encodings *encoding_info; struct unicode_map *encoding; @@ -5517,7 +5517,7 @@ failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index f61e65ae27d8..d233c24ea342 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -309,7 +309,7 @@ EXT4_ATTR_FEATURE(meta_bg_resize); EXT4_ATTR_FEATURE(encryption); EXT4_ATTR_FEATURE(test_dummy_encryption_v2); #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) EXT4_ATTR_FEATURE(casefold); #endif #ifdef CONFIG_FS_VERITY @@ -317,7 +317,7 @@ EXT4_ATTR_FEATURE(verity); #endif EXT4_ATTR_FEATURE(metadata_csum_seed); EXT4_ATTR_FEATURE(fast_commit); -#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) +#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) EXT4_ATTR_FEATURE(encrypted_casefold); #endif @@ -329,7 +329,7 @@ static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(encryption), ATTR_LIST(test_dummy_encryption_v2), #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(casefold), #endif #ifdef CONFIG_FS_VERITY @@ -337,7 +337,7 @@ static struct attribute *ext4_feat_attrs[] = { #endif ATTR_LIST(metadata_csum_seed), ATTR_LIST(fast_commit), -#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) +#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) ATTR_LIST(encrypted_casefold), #endif NULL, diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 1820e9c106f7..166f08623362 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -16,7 +16,7 @@ #include "xattr.h" #include -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern struct kmem_cache *f2fs_cf_name_slab; #endif @@ -79,7 +79,7 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de) int f2fs_init_casefolded_name(const struct inode *dir, struct f2fs_filename *fname) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct super_block *sb = dir->i_sb; if (IS_CASEFOLDED(dir)) { @@ -174,7 +174,7 @@ void f2fs_free_filename(struct f2fs_filename *fname) kfree(fname->crypto_buf.name); fname->crypto_buf.name = NULL; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (fname->cf_name.name) { kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); fname->cf_name.name = NULL; @@ -208,7 +208,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, return f2fs_find_target_dentry(&d, fname, max_slots); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Test whether a case-insensitive directory entry matches the filename * being searched for. @@ -266,7 +266,7 @@ static inline int f2fs_match_name(const struct inode *dir, { struct fscrypt_name f; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (fname->cf_name.name) { struct qstr cf = FSTR_TO_QSTR(&fname->cf_name); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d0d603187171..4da88928ffb5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -487,7 +487,7 @@ struct f2fs_filename { */ struct fscrypt_str crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * For casefolded directories: the casefolded name, but it's left NULL * if the original name is not valid Unicode, if the directory is both diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index e3beac546c63..3cb1e7a24740 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -105,7 +105,7 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) return; } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (IS_CASEFOLDED(dir)) { /* * If the casefolded name is provided, hash it instead of the diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a728a0af9ce0..5f213f05556d 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -561,7 +561,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out_iput; } out_splice: -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (!inode && IS_CASEFOLDED(dir)) { /* Eventually we want to call d_add_ci(dentry, NULL) * for negative dentries in the encoding case as @@ -622,7 +622,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) goto fail; } f2fs_delete_entry(de, page, dir, inode); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index d1664a0567ef..2fbbc820c00a 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -46,7 +46,7 @@ static struct kmem_cache *fsync_entry_slab; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern struct kmem_cache *f2fs_cf_name_slab; #endif @@ -149,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir, if (err) return err; f2fs_hash_filename(dir, fname); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* Case-sensitive match is fine for recovery */ kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); fname->cf_name.name = NULL; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 15f12ece0ac6..b870c6459fa1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -256,7 +256,7 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) va_end(args); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) static const struct f2fs_sb_encodings { __u16 magic; char *name; @@ -1218,7 +1218,7 @@ default_check: return -EINVAL; } #endif -#ifndef CONFIG_UNICODE +#if !IS_ENABLED(CONFIG_UNICODE) if (f2fs_sb_has_casefold(sbi)) { f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); @@ -1578,7 +1578,7 @@ static void f2fs_put_super(struct super_block *sb) f2fs_destroy_iostat(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif kfree(sbi); @@ -3861,7 +3861,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { const struct f2fs_sb_encodings *encoding_info; struct unicode_map *encoding; @@ -4412,7 +4412,7 @@ free_bio_info: for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); sb->s_encoding = NULL; #endif diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 8408f77764e8..fa3d9cb2d69b 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -192,7 +192,7 @@ static ssize_t unusable_show(struct f2fs_attr *a, static ssize_t encoding_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct super_block *sb = sbi->sb; if (f2fs_sb_has_casefold(sbi)) @@ -756,7 +756,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks); #ifdef CONFIG_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption); F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) F2FS_FEATURE_RO_ATTR(encrypted_casefold); #endif #endif /* CONFIG_FS_ENCRYPTION */ @@ -775,7 +775,7 @@ F2FS_FEATURE_RO_ATTR(lost_found); F2FS_FEATURE_RO_ATTR(verity); #endif F2FS_FEATURE_RO_ATTR(sb_checksum); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) F2FS_FEATURE_RO_ATTR(casefold); #endif F2FS_FEATURE_RO_ATTR(readonly); @@ -886,7 +886,7 @@ static struct attribute *f2fs_feat_attrs[] = { #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), ATTR_LIST(test_dummy_encryption_v2), -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(encrypted_casefold), #endif #endif /* CONFIG_FS_ENCRYPTION */ @@ -905,7 +905,7 @@ static struct attribute *f2fs_feat_attrs[] = { ATTR_LIST(verity), #endif ATTR_LIST(sb_checksum), -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(casefold), #endif ATTR_LIST(readonly), diff --git a/fs/libfs.c b/fs/libfs.c index ba7438ab9371..974125270a42 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1379,7 +1379,7 @@ bool is_empty_dir_inode(struct inode *inode) (inode->i_op == &empty_dir_inode_operations); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Determine if the name of a dentry should be casefolded. * @@ -1473,7 +1473,7 @@ static const struct dentry_operations generic_encrypted_dentry_ops = { }; #endif -#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE) +#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE) static const struct dentry_operations generic_encrypted_ci_dentry_ops = { .d_hash = generic_ci_d_hash, .d_compare = generic_ci_d_compare, @@ -1508,10 +1508,10 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry) #ifdef CONFIG_FS_ENCRYPTION bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) bool needs_ci_ops = dentry->d_sb->s_encoding; #endif -#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE) +#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE) if (needs_encrypt_ops && needs_ci_ops) { d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops); return; @@ -1523,7 +1523,7 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry) return; } #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (needs_ci_ops) { d_set_d_op(dentry, &generic_ci_dentry_ops); return; diff --git a/fs/unicode/Kconfig b/fs/unicode/Kconfig index 610d7bc05d6e..da786a687fdc 100644 --- a/fs/unicode/Kconfig +++ b/fs/unicode/Kconfig @@ -3,21 +3,13 @@ # UTF-8 normalization # config UNICODE - bool "UTF-8 normalization and casefolding support" + tristate "UTF-8 normalization and casefolding support" help Say Y here to enable UTF-8 NFD normalization and NFD+CF casefolding - support. - -config UNICODE_UTF8_DATA - tristate "UTF-8 normalization and casefolding tables" - depends on UNICODE - default UNICODE - help - This contains a large table of case foldings, which can be loaded as - a separate module if you say M here. To be on the safe side stick - to the default of Y. Saying N here makes no sense, if you do not want - utf8 casefolding support, disable CONFIG_UNICODE instead. + support. If you say M here the large table of case foldings will + be a separate loadable module that gets requested only when a file + system actually use it. config UNICODE_NORMALIZATION_SELFTEST tristate "Test UTF-8 normalization support" - depends on UNICODE_UTF8_DATA + depends on UNICODE diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile index 2f9d9188852b..0cc87423de82 100644 --- a/fs/unicode/Makefile +++ b/fs/unicode/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_UNICODE) += unicode.o +ifneq ($(CONFIG_UNICODE),) +obj-y += unicode.o +endif +obj-$(CONFIG_UNICODE) += utf8data.o obj-$(CONFIG_UNICODE_NORMALIZATION_SELFTEST) += utf8-selftest.o -obj-$(CONFIG_UNICODE_UTF8_DATA) += utf8data.o unicode-y := utf8-norm.o utf8-core.o diff --git a/include/linux/fs.h b/include/linux/fs.h index c8510da6cc6d..fdac22d16c2b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1490,7 +1490,7 @@ struct super_block { #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct unicode_map *s_encoding; __u16 s_encoding_flags; #endif -- cgit v1.2.3-71-gd317 From 748cd5729ac7421091316e32dcdffb0578563880 Mon Sep 17 00:00:00 2001 From: Di Zhu Date: Wed, 19 Jan 2022 09:40:04 +0800 Subject: bpf: support BPF_PROG_QUERY for progs attached to sockmap Right now there is no way to query whether BPF programs are attached to a sockmap or not. we can use the standard interface in libbpf to query, such as: bpf_prog_query(mapFd, BPF_SK_SKB_STREAM_PARSER, 0, NULL, ...); the mapFd is the fd of sockmap. Signed-off-by: Di Zhu Acked-by: Yonghong Song Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/r/20220119014005.1209-1-zhudi2@huawei.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 9 ++++++ kernel/bpf/syscall.c | 5 ++++ net/core/sock_map.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 84 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index dce54eb0aae8..80e3387ea3af 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2069,6 +2069,9 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); +int sock_map_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); + void sock_map_unhash(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else @@ -2122,6 +2125,12 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } + +static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fa4505f9b611..9e0631f091a6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3318,6 +3318,11 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_FLOW_DISSECTOR: case BPF_SK_LOOKUP: return netns_bpf_prog_query(attr, uattr); + case BPF_SK_SKB_STREAM_PARSER: + case BPF_SK_SKB_STREAM_VERDICT: + case BPF_SK_MSG_VERDICT: + case BPF_SK_SKB_VERDICT: + return sock_map_bpf_prog_query(attr, uattr); default: return -EINVAL; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 1827669eedd6..2d213c4011db 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1416,38 +1416,50 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) return NULL; } -static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which) +static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog, + u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); - struct bpf_prog **pprog; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: - pprog = &progs->msg_parser; + *pprog = &progs->msg_parser; break; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) case BPF_SK_SKB_STREAM_PARSER: - pprog = &progs->stream_parser; + *pprog = &progs->stream_parser; break; #endif case BPF_SK_SKB_STREAM_VERDICT: if (progs->skb_verdict) return -EBUSY; - pprog = &progs->stream_verdict; + *pprog = &progs->stream_verdict; break; case BPF_SK_SKB_VERDICT: if (progs->stream_verdict) return -EBUSY; - pprog = &progs->skb_verdict; + *pprog = &progs->skb_verdict; break; default: return -EOPNOTSUPP; } + return 0; +} + +static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which) +{ + struct bpf_prog **pprog; + int ret; + + ret = sock_map_prog_lookup(map, &pprog, which); + if (ret) + return ret; + if (old) return psock_replace_prog(pprog, prog, old); @@ -1455,6 +1467,57 @@ static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, return 0; } +int sock_map_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd; + struct bpf_prog **pprog; + struct bpf_prog *prog; + struct bpf_map *map; + struct fd f; + u32 id = 0; + int ret; + + if (attr->query.query_flags) + return -EINVAL; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + rcu_read_lock(); + + ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type); + if (ret) + goto end; + + prog = *pprog; + prog_cnt = !prog ? 0 : 1; + + if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) + goto end; + + /* we do not hold the refcnt, the bpf prog may be released + * asynchronously and the id would be set to 0. + */ + id = data_race(prog->aux->id); + if (id == 0) + prog_cnt = 0; + +end: + rcu_read_unlock(); + + if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) || + (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) || + copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) + ret = -EFAULT; + + fdput(f); + return ret; +} + static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) { switch (link->map->map_type) { -- cgit v1.2.3-71-gd317 From d16697cb6261d4cc23422e6b1cb2759df8aa76d0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:09:44 +0100 Subject: net: skbuff: add size metadata to skb_shared_info for xdp Introduce xdp_frags_size field in skb_shared_info data structure to store xdp_buff/xdp_frame frame paged size (xdp_frags_size will be used in xdp frags support). In order to not increase skb_shared_info size we will use a hole due to skb_shared_info alignment. Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Acked-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/8a849819a3e0a143d540f78a3a5add76e17e980d.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/skbuff.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bf11e1fbd69b..8131d0de7559 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -557,6 +557,7 @@ struct skb_shared_info { * Warning : all fields before dataref are cleared in __alloc_skb() */ atomic_t dataref; + unsigned int xdp_frags_size; /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ -- cgit v1.2.3-71-gd317 From c2f2cdbeffda7b153c19e0f3d73149c41026c0db Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:09:52 +0100 Subject: bpf: introduce BPF_F_XDP_HAS_FRAGS flag in prog_flags loading the ebpf program Introduce BPF_F_XDP_HAS_FRAGS and the related field in bpf_prog_aux in order to notify the driver the loaded program support xdp frags. Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/db2e8075b7032a356003f407d1b0deb99adaa0ed.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 5 +++++ kernel/bpf/syscall.c | 4 +++- tools/include/uapi/linux/bpf.h | 5 +++++ 4 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 80e3387ea3af..e93ed028a030 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -933,6 +933,7 @@ struct bpf_prog_aux { bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; + bool xdp_has_frags; struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index fe2272defcd9..945649c67e03 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9e0631f091a6..f29090643c6e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2217,7 +2217,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) BPF_F_ANY_ALIGNMENT | BPF_F_TEST_STATE_FREQ | BPF_F_SLEEPABLE | - BPF_F_TEST_RND_HI32)) + BPF_F_TEST_RND_HI32 | + BPF_F_XDP_HAS_FRAGS)) return -EINVAL; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && @@ -2303,6 +2304,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) prog->aux->dst_prog = dst_prog; prog->aux->offload_requested = !!attr->prog_ifindex; prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; + prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; err = security_bpf_prog_alloc(prog->aux); if (err) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index fe2272defcd9..945649c67e03 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * -- cgit v1.2.3-71-gd317 From f45d5b6ce2e835834c94b8b700787984f02cd662 Mon Sep 17 00:00:00 2001 From: Toke Hoiland-Jorgensen Date: Fri, 21 Jan 2022 11:10:02 +0100 Subject: bpf: generalise tail call map compatibility check The check for tail call map compatibility ensures that tail calls only happen between maps of the same type. To ensure backwards compatibility for XDP frags we need a similar type of check for cpumap and devmap programs, so move the state from bpf_array_aux into bpf_map, add xdp_has_frags to the check, and apply the same check to cpumap and devmap. Acked-by: John Fastabend Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Toke Hoiland-Jorgensen Link: https://lore.kernel.org/r/f19fd97c0328a39927f3ad03e1ca6b43fd53cdfd.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 30 +++++++++++++++++++----------- kernel/bpf/arraymap.c | 4 +--- kernel/bpf/core.c | 28 ++++++++++++++-------------- kernel/bpf/cpumap.c | 8 +++++--- kernel/bpf/devmap.c | 3 ++- kernel/bpf/syscall.c | 15 +++++++-------- 6 files changed, 48 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e93ed028a030..e8ec8d2f2fe3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -194,6 +194,17 @@ struct bpf_map { struct work_struct work; struct mutex freeze_mutex; atomic64_t writecnt; + /* 'Ownership' of program-containing map is claimed by the first program + * that is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have the + * same prog type, JITed flag and xdp_has_frags flag. + */ + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -994,16 +1005,6 @@ struct bpf_prog_aux { }; struct bpf_array_aux { - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@ -1178,7 +1179,14 @@ struct bpf_event_entry { struct rcu_head rcu; }; -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +static inline bool map_type_contains_progs(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || + map->map_type == BPF_MAP_TYPE_DEVMAP || + map->map_type == BPF_MAP_TYPE_CPUMAP; +} + +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index c7a5be3bf8be..7f145aefbff8 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -837,13 +837,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { - struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_prog *prog = bpf_prog_get(fd); if (IS_ERR(prog)) return prog; - if (!bpf_prog_array_compatible(array, prog)) { + if (!bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } @@ -1071,7 +1070,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); - spin_lock_init(&aux->owner.lock); map = array_map_alloc(attr); if (IS_ERR(map)) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index de3e5bc6781f..0a1cfd8544b9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1829,28 +1829,30 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, } #endif -bool bpf_prog_array_compatible(struct bpf_array *array, - const struct bpf_prog *fp) +bool bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) { bool ret; if (fp->kprobe_override) return false; - spin_lock(&array->aux->owner.lock); - - if (!array->aux->owner.type) { + spin_lock(&map->owner.lock); + if (!map->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->owner.type = fp->type; - array->aux->owner.jited = fp->jited; + map->owner.type = fp->type; + map->owner.jited = fp->jited; + map->owner.xdp_has_frags = fp->aux->xdp_has_frags; ret = true; } else { - ret = array->aux->owner.type == fp->type && - array->aux->owner.jited == fp->jited; + ret = map->owner.type == fp->type && + map->owner.jited == fp->jited && + map->owner.xdp_has_frags == fp->aux->xdp_has_frags; } - spin_unlock(&array->aux->owner.lock); + spin_unlock(&map->owner.lock); + return ret; } @@ -1862,13 +1864,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) mutex_lock(&aux->used_maps_mutex); for (i = 0; i < aux->used_map_cnt; i++) { struct bpf_map *map = aux->used_maps[i]; - struct bpf_array *array; - if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) + if (!map_type_contains_progs(map)) continue; - array = container_of(map, struct bpf_array, map); - if (!bpf_prog_array_compatible(array, fp)) { + if (!bpf_prog_map_compatible(map, fp)) { ret = -EINVAL; goto out; } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index b3e6b9422238..650e5d21f90d 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -397,7 +397,8 @@ static int cpu_map_kthread_run(void *data) return 0; } -static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, + struct bpf_map *map, int fd) { struct bpf_prog *prog; @@ -405,7 +406,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) if (IS_ERR(prog)) return PTR_ERR(prog); - if (prog->expected_attach_type != BPF_XDP_CPUMAP) { + if (prog->expected_attach_type != BPF_XDP_CPUMAP || + !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return -EINVAL; } @@ -457,7 +459,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, rcpu->map_id = map->id; rcpu->value.qsize = value->qsize; - if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) goto free_ptr_ring; /* Setup kthread */ diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index fe019dbdb3f0..038f6d7a83e4 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -858,7 +858,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, BPF_PROG_TYPE_XDP, false); if (IS_ERR(prog)) goto err_put_dev; - if (prog->expected_attach_type != BPF_XDP_DEVMAP) + if (prog->expected_attach_type != BPF_XDP_DEVMAP || + !bpf_prog_map_compatible(&dtab->map, prog)) goto err_put_prog; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index f29090643c6e..72ce1edde950 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -556,16 +556,14 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) { - const struct bpf_map *map = filp->private_data; - const struct bpf_array *array; + struct bpf_map *map = filp->private_data; u32 type = 0, jited = 0; - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { - array = container_of(map, struct bpf_array, map); - spin_lock(&array->aux->owner.lock); - type = array->aux->owner.type; - jited = array->aux->owner.jited; - spin_unlock(&array->aux->owner.lock); + if (map_type_contains_progs(map)) { + spin_lock(&map->owner.lock); + type = map->owner.type; + jited = map->owner.jited; + spin_unlock(&map->owner.lock); } seq_printf(m, @@ -874,6 +872,7 @@ static int map_create(union bpf_attr *attr) atomic64_set(&map->refcnt, 1); atomic64_set(&map->usercnt, 1); mutex_init(&map->freeze_mutex); + spin_lock_init(&map->owner.lock); map->spin_lock_off = -EINVAL; map->timer_off = -EINVAL; -- cgit v1.2.3-71-gd317 From 96489c1c0b53131b0e1ec33e2060538379ad6152 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 16 Dec 2021 12:16:38 +0100 Subject: mtd: nand: ecc: Add infrastructure to support hardware engines Add the necessary helpers to register/unregister hardware ECC engines that will be called from ECC engine drivers. Also add helpers to get the right engine from the user perspective. Keep a reference of the in use ECC engine in order to prevent modules to be unloaded. Put the reference when the engine gets retired. A static list of hardware (only) ECC engines is setup to keep track of the registered engines. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-13-miquel.raynal@bootlin.com --- drivers/mtd/nand/core.c | 10 ++++-- drivers/mtd/nand/ecc.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 28 +++++++++++++++ 3 files changed, 123 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index 416947f28b67..ff34e3fd84c0 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -235,7 +235,9 @@ static int nanddev_get_ecc_engine(struct nand_device *nand) nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand); break; case NAND_ECC_ENGINE_TYPE_ON_HOST: - pr_err("On-host hardware ECC engines not supported yet\n"); + nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand); + if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER) + return -EPROBE_DEFER; break; default: pr_err("Missing ECC engine type\n"); @@ -255,7 +257,7 @@ static int nanddev_put_ecc_engine(struct nand_device *nand) { switch (nand->ecc.ctx.conf.engine_type) { case NAND_ECC_ENGINE_TYPE_ON_HOST: - pr_err("On-host hardware ECC engines not supported yet\n"); + nand_ecc_put_on_host_hw_engine(nand); break; case NAND_ECC_ENGINE_TYPE_NONE: case NAND_ECC_ENGINE_TYPE_SOFT: @@ -300,7 +302,9 @@ int nanddev_ecc_engine_init(struct nand_device *nand) /* Look for the ECC engine to use */ ret = nanddev_get_ecc_engine(nand); if (ret) { - pr_err("No ECC engine found\n"); + if (ret != -EPROBE_DEFER) + pr_err("No ECC engine found\n"); + return ret; } diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c index 6c43dfda01d4..078f5ec38de3 100644 --- a/drivers/mtd/nand/ecc.c +++ b/drivers/mtd/nand/ecc.c @@ -96,6 +96,12 @@ #include #include #include +#include +#include +#include + +static LIST_HEAD(on_host_hw_engines); +static DEFINE_MUTEX(on_host_hw_engines_mutex); /** * nand_ecc_init_ctx - Init the ECC engine context @@ -611,6 +617,88 @@ struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand) } EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine); +int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + struct nand_ecc_engine *item; + + if (!engine) + return -EINVAL; + + /* Prevent multiple registrations of one engine */ + list_for_each_entry(item, &on_host_hw_engines, node) + if (item == engine) + return 0; + + mutex_lock(&on_host_hw_engines_mutex); + list_add_tail(&engine->node, &on_host_hw_engines); + mutex_unlock(&on_host_hw_engines_mutex); + + return 0; +} +EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine); + +int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + if (!engine) + return -EINVAL; + + mutex_lock(&on_host_hw_engines_mutex); + list_del(&engine->node); + mutex_unlock(&on_host_hw_engines_mutex); + + return 0; +} +EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine); + +static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev) +{ + struct nand_ecc_engine *item; + + list_for_each_entry(item, &on_host_hw_engines, node) + if (item->dev == dev) + return item; + + return NULL; +} + +struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand) +{ + struct nand_ecc_engine *engine = NULL; + struct device *dev = &nand->mtd.dev; + struct platform_device *pdev; + struct device_node *np; + + if (list_empty(&on_host_hw_engines)) + return NULL; + + /* Check for an explicit nand-ecc-engine property */ + np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0); + if (np) { + pdev = of_find_device_by_node(np); + if (!pdev) + return ERR_PTR(-EPROBE_DEFER); + + engine = nand_ecc_match_on_host_hw_engine(&pdev->dev); + platform_device_put(pdev); + of_node_put(np); + + if (!engine) + return ERR_PTR(-EPROBE_DEFER); + } + + if (engine) + get_device(engine->dev); + + return engine; +} +EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine); + +void nand_ecc_put_on_host_hw_engine(struct nand_device *nand) +{ + put_device(nand->ecc.engine->dev); +} +EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Miquel Raynal "); MODULE_DESCRIPTION("Generic ECC engine"); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 32fc7edf65b3..4ddd20fe9c9e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -263,12 +263,36 @@ struct nand_ecc_engine_ops { struct nand_page_io_req *req); }; +/** + * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated + * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value + * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly + * correction, does not need to copy + * data around + * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the + * data into its own area before use + */ +enum nand_ecc_engine_integration { + NAND_ECC_ENGINE_INTEGRATION_INVALID, + NAND_ECC_ENGINE_INTEGRATION_PIPELINED, + NAND_ECC_ENGINE_INTEGRATION_EXTERNAL, +}; + /** * struct nand_ecc_engine - ECC engine abstraction for NAND devices + * @dev: Host device + * @node: Private field for registration time * @ops: ECC engine operations + * @integration: How the engine is integrated with the host + * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines) + * @priv: Private data */ struct nand_ecc_engine { + struct device *dev; + struct list_head node; struct nand_ecc_engine_ops *ops; + enum nand_ecc_engine_integration integration; + void *priv; }; void of_get_nand_ecc_user_config(struct nand_device *nand); @@ -279,8 +303,12 @@ int nand_ecc_prepare_io_req(struct nand_device *nand, int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req); bool nand_ecc_is_strong_enough(struct nand_device *nand); +int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine); +int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine); struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand); +void nand_ecc_put_on_host_hw_engine(struct nand_device *nand); #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); -- cgit v1.2.3-71-gd317 From cda32a618debd3fad8e42757b198719ae180f8f4 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 16 Dec 2021 12:16:39 +0100 Subject: mtd: nand: Add a new helper to retrieve the ECC context Introduce nand_to_ecc_ctx() which will allow to easily jump to the private pointer of an ECC context given a NAND device. This is very handy, from the prepare or finish ECC hook, to get the internal context out of the NAND device object. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-14-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 4ddd20fe9c9e..b617efa0a881 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -990,6 +990,11 @@ int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); int nanddev_ecc_engine_init(struct nand_device *nand); void nanddev_ecc_engine_cleanup(struct nand_device *nand); +static inline void *nand_to_ecc_ctx(struct nand_device *nand) +{ + return nand->ecc.ctx.priv; +} + /* BBT related functions */ enum nand_bbt_block_status { NAND_BBT_BLOCK_STATUS_UNKNOWN, -- cgit v1.2.3-71-gd317 From 02d1d0e4dfc3fdc5aa05b78e7def00dc1e62257e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 7 Jan 2022 10:46:11 -0800 Subject: mtd: rawnand: brcmnand: Add platform data structure for BCMA Update the BCMA's chipcommon nand flash driver to detect which chip-select is used and pass that information via platform data to the brcmnand driver. Make sure that the brcmnand platform data structure is always at the beginning of the platform data of the "nflash" device created by BCMA to allow brcmnand to safely de-reference it. Signed-off-by: Florian Fainelli Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20220107184614.2670254-7-f.fainelli@gmail.com --- MAINTAINERS | 1 + drivers/bcma/driver_chipcommon_nflash.c | 20 +++++++++++++++++++- include/linux/bcma/bcma_driver_chipcommon.h | 5 +++++ include/linux/platform_data/brcmnand.h | 12 ++++++++++++ 4 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 include/linux/platform_data/brcmnand.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index ea3e6c914384..8a1c1e92937f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4021,6 +4021,7 @@ L: linux-mtd@lists.infradead.org L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/mtd/nand/raw/brcmnand/ +F: include/linux/platform_data/brcmnand.h BROADCOM STB PCIE DRIVER M: Jim Quinlan diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index d4f699aef8c4..a1a814750b4a 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c @@ -7,18 +7,28 @@ #include "bcma_private.h" +#include #include +#include #include +/* Alternate NAND controller driver name in order to allow both bcm47xxnflash + * and bcma_brcmnand to be built into the same kernel image. + */ +static const char *bcma_nflash_alt_name = "bcma_brcmnand"; + struct platform_device bcma_nflash_dev = { .name = "bcma_nflash", .num_resources = 0, }; +static const char *probes[] = { "bcm47xxpart", NULL }; + /* Initialize NAND flash access */ int bcma_nflash_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; + u32 reg; if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && cc->core->id.rev != 38) { @@ -33,8 +43,16 @@ int bcma_nflash_init(struct bcma_drv_cc *cc) cc->nflash.present = true; if (cc->core->id.rev == 38 && - (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT)) + (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT)) { cc->nflash.boot = true; + /* Determine the chip select that is being used */ + reg = bcma_cc_read32(cc, BCMA_CC_NAND_CS_NAND_SELECT) & 0xff; + cc->nflash.brcmnand_info.chip_select = ffs(reg) - 1; + cc->nflash.brcmnand_info.part_probe_types = probes; + cc->nflash.brcmnand_info.ecc_stepsize = 512; + cc->nflash.brcmnand_info.ecc_strength = 1; + bcma_nflash_dev.name = bcma_nflash_alt_name; + } /* Prepare platform device, but don't register it yet. It's too early, * malloc (required by device_private_init) is not available yet. */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index d35b9206096d..e3314f746bfa 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -3,6 +3,7 @@ #define LINUX_BCMA_DRIVER_CC_H_ #include +#include #include /** ChipCommon core registers. **/ @@ -599,6 +600,10 @@ struct bcma_sflash { #ifdef CONFIG_BCMA_NFLASH struct bcma_nflash { + /* Must be the fist member for the brcmnand driver to + * de-reference that structure. + */ + struct brcmnand_platform_data brcmnand_info; bool present; bool boot; /* This is the flash the SoC boots from */ }; diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h new file mode 100644 index 000000000000..8b8777985dce --- /dev/null +++ b/include/linux/platform_data/brcmnand.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef BRCMNAND_PLAT_DATA_H +#define BRCMNAND_PLAT_DATA_H + +struct brcmnand_platform_data { + int chip_select; + const char * const *part_probe_types; + unsigned int ecc_stepsize; + unsigned int ecc_strength; +}; + +#endif /* BRCMNAND_PLAT_DATA_H */ -- cgit v1.2.3-71-gd317 From 1e73d7f689c7a8fa13f78fe8d6be908fdceef17a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 15 Dec 2021 16:13:35 +0100 Subject: iio: core: Fix the kernel doc regarding the currentmode iio_dev entry This is an internal variable, which should be accessed in a very sporadic way and in no case changed by any device driver. Signed-off-by: Miquel Raynal Reviewed-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20211215151344.163036-2-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 07025d6b3de1..faf00f2c0be6 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -489,7 +489,7 @@ struct iio_buffer_setup_ops { /** * struct iio_dev - industrial I/O device * @modes: [DRIVER] operating modes supported by device - * @currentmode: [DRIVER] current operating mode + * @currentmode: [INTERN] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner * @buffer: [DRIVER] any buffer present -- cgit v1.2.3-71-gd317 From da5936770517aef8b28888f1123fa654c78cc2f9 Mon Sep 17 00:00:00 2001 From: Nuno Sá Date: Sat, 22 Jan 2022 14:09:04 +0100 Subject: adis: simplify 'adis_update_bits' macros MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no need to use '__builtin_choose_expr' to choose the right call to 'adis_update_bits_base()'. We can change the 'BUILD_BUG_ON()' condition so that it makes sure only the supported sizes are passed in. With that, we can just use 'sizeof(val)' as the size argument of 'adis_update_bits_base()'. Signed-off-by: Nuno Sá Link: https://lore.kernel.org/r/20220122130905.99-2-nuno.sa@analog.com Signed-off-by: Jonathan Cameron --- include/linux/iio/imu/adis.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 7c02f5292eea..11754f97d8bb 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -381,10 +381,8 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, * @val can lead to undesired behavior if the register to update is 16bit. */ #define adis_update_bits(adis, reg, mask, val) ({ \ - BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ - __builtin_choose_expr(sizeof(val) == 4, \ - adis_update_bits_base(adis, reg, mask, val, 4), \ - adis_update_bits_base(adis, reg, mask, val, 2)); \ + BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \ + adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \ }) /** @@ -399,10 +397,8 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, * @val can lead to undesired behavior if the register to update is 16bit. */ #define __adis_update_bits(adis, reg, mask, val) ({ \ - BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ - __builtin_choose_expr(sizeof(val) == 4, \ - __adis_update_bits_base(adis, reg, mask, val, 4), \ - __adis_update_bits_base(adis, reg, mask, val, 2)); \ + BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \ + __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \ }) int adis_enable_irq(struct adis *adis, bool enable); -- cgit v1.2.3-71-gd317 From c39010ea6ba13bdf0003bd353e1d4c663aaac0a8 Mon Sep 17 00:00:00 2001 From: Nuno Sá Date: Sat, 22 Jan 2022 14:09:05 +0100 Subject: iio: adis: stylistic changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Minor stylistic changes to address checkptach complains when called with '--strict'. Signed-off-by: Nuno Sá Link: https://lore.kernel.org/r/20220122130905.99-3-nuno.sa@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis.c | 47 ++++++++++++++++++++++------------------- drivers/iio/imu/adis_buffer.c | 6 +++--- drivers/iio/imu/adis_trigger.c | 3 +-- include/linux/iio/imu/adis.h | 48 ++++++++++++++++++++++-------------------- 4 files changed, 54 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index cb0d66bf6561..638957001653 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -30,8 +30,8 @@ * @value: The value to write to device (up to 4 bytes) * @size: The size of the @value (in bytes) */ -int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int value, unsigned int size) +int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; int ret, i; @@ -114,7 +114,7 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n", - reg, ret); + reg, ret); } else { adis->current_page = page; } @@ -130,8 +130,8 @@ EXPORT_SYMBOL_GPL(__adis_write_reg); * @val: The value read back from the device * @size: The size of the @val buffer */ -int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) +int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, + unsigned int size) { unsigned int page = reg / ADIS_PAGE_SIZE; struct spi_message msg; @@ -201,12 +201,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, ret = spi_sync(adis->spi, &msg); if (ret) { dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n", - reg, ret); + reg, ret); return ret; - } else { - adis->current_page = page; } + adis->current_page = page; + switch (size) { case 4: *val = get_unaligned_be32(adis->rx); @@ -247,13 +247,13 @@ EXPORT_SYMBOL_GPL(__adis_update_bits_base); #ifdef CONFIG_DEBUG_FS -int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval) +int adis_debugfs_reg_access(struct iio_dev *indio_dev, unsigned int reg, + unsigned int writeval, unsigned int *readval) { struct adis *adis = iio_device_get_drvdata(indio_dev); if (readval) { - uint16_t val16; + u16 val16; int ret; ret = adis_read_reg_16(adis, reg, &val16); @@ -261,9 +261,9 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev, *readval = val16; return ret; - } else { - return adis_write_reg_16(adis, reg, writeval); } + + return adis_write_reg_16(adis, reg, writeval); } EXPORT_SYMBOL(adis_debugfs_reg_access); @@ -279,14 +279,16 @@ EXPORT_SYMBOL(adis_debugfs_reg_access); int adis_enable_irq(struct adis *adis, bool enable) { int ret = 0; - uint16_t msc; + u16 msc; mutex_lock(&adis->state_lock); if (adis->data->enable_irq) { ret = adis->data->enable_irq(adis, enable); goto out_unlock; - } else if (adis->data->unmasked_drdy) { + } + + if (adis->data->unmasked_drdy) { if (enable) enable_irq(adis->spi->irq); else @@ -322,7 +324,7 @@ EXPORT_SYMBOL(adis_enable_irq); */ int __adis_check_status(struct adis *adis) { - uint16_t status; + u16 status; int ret; int i; @@ -358,7 +360,7 @@ int __adis_reset(struct adis *adis) const struct adis_timeout *timeouts = adis->data->timeouts; ret = __adis_write_reg_8(adis, adis->data->glob_cmd_reg, - ADIS_GLOB_CMD_SW_RESET); + ADIS_GLOB_CMD_SW_RESET); if (ret) { dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret); return ret; @@ -414,7 +416,7 @@ int __adis_initial_startup(struct adis *adis) { const struct adis_timeout *timeouts = adis->data->timeouts; struct gpio_desc *gpio; - uint16_t prod_id; + u16 prod_id; int ret; /* check if the device has rst pin low */ @@ -423,7 +425,7 @@ int __adis_initial_startup(struct adis *adis) return PTR_ERR(gpio); if (gpio) { - msleep(10); + usleep_range(10, 12); /* bring device out of reset */ gpiod_set_value_cansleep(gpio, 0); msleep(timeouts->reset_ms); @@ -477,7 +479,8 @@ EXPORT_SYMBOL_GPL(__adis_initial_startup); * a error bit in the channels raw value set error_mask to 0. */ int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, int *val) + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int uval; @@ -486,7 +489,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, mutex_lock(&adis->state_lock); ret = __adis_read_reg(adis, chan->address, &uval, - chan->scan_type.storagebits / 8); + chan->scan_type.storagebits / 8); if (ret) goto err_unlock; @@ -521,7 +524,7 @@ EXPORT_SYMBOL_GPL(adis_single_conversion); * called. */ int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data) + struct spi_device *spi, const struct adis_data *data) { if (!data || !data->timeouts) { dev_err(&spi->dev, "No config data or timeouts not defined!\n"); diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 351c303c8a8c..d3527cf5ed37 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -20,7 +20,7 @@ #include static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); unsigned int burst_length, burst_max_length; @@ -67,7 +67,7 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, } int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask) + const unsigned long *scan_mask) { struct adis *adis = iio_device_get_drvdata(indio_dev); const struct iio_chan_spec *chan; @@ -158,7 +158,7 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, - pf->timestamp); + pf->timestamp); irq_done: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index c461bd1e8e69..0e7fb00ba241 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -15,8 +15,7 @@ #include #include -static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, - bool state) +static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct adis *adis = iio_trigger_get_drvdata(trig); diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 11754f97d8bb..515ca09764fe 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -32,6 +32,7 @@ struct adis_timeout { u16 sw_reset_ms; u16 self_test_ms; }; + /** * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us @@ -45,7 +46,7 @@ struct adis_timeout { * @self_test_mask: Bitmask of supported self-test operations * @self_test_reg: Register address to request self test command * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg - * @status_error_msgs: Array of error messgaes + * @status_error_msgs: Array of error messages * @status_error_mask: Bitmask of errors supported by the device * @timeouts: Chip specific delays * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable @@ -130,12 +131,12 @@ struct adis { unsigned long irq_flag; void *buffer; - uint8_t tx[10] ____cacheline_aligned; - uint8_t rx[4]; + u8 tx[10] ____cacheline_aligned; + u8 rx[4]; }; int adis_init(struct adis *adis, struct iio_dev *indio_dev, - struct spi_device *spi, const struct adis_data *data); + struct spi_device *spi, const struct adis_data *data); int __adis_reset(struct adis *adis); /** @@ -156,9 +157,9 @@ static inline int adis_reset(struct adis *adis) } int __adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size); + unsigned int val, unsigned int size); int __adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size); + unsigned int *val, unsigned int size); /** * __adis_write_reg_8() - Write single byte to a register (unlocked) @@ -167,7 +168,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return __adis_write_reg(adis, reg, val, 1); } @@ -179,7 +180,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return __adis_write_reg(adis, reg, val, 2); } @@ -191,7 +192,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return __adis_write_reg(adis, reg, val, 4); } @@ -203,7 +204,7 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -222,7 +223,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -242,7 +243,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg, * @size: The size of the @value (in bytes) */ static inline int adis_write_reg(struct adis *adis, unsigned int reg, - unsigned int val, unsigned int size) + unsigned int val, unsigned int size) { int ret; @@ -261,7 +262,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg, * @size: The size of the @val buffer */ static int adis_read_reg(struct adis *adis, unsigned int reg, - unsigned int *val, unsigned int size) + unsigned int *val, unsigned int size) { int ret; @@ -279,7 +280,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg, * @value: The value to write */ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, - uint8_t val) + u8 val) { return adis_write_reg(adis, reg, val, 1); } @@ -291,7 +292,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, - uint16_t val) + u16 val) { return adis_write_reg(adis, reg, val, 2); } @@ -303,7 +304,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, * @value: Value to be written */ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, - uint32_t val) + u32 val) { return adis_write_reg(adis, reg, val, 4); } @@ -315,7 +316,7 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, - uint16_t *val) + u16 *val) { unsigned int tmp; int ret; @@ -334,7 +335,7 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, * @val: The value read back from the device */ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, - uint32_t *val) + u32 *val) { unsigned int tmp; int ret; @@ -439,8 +440,8 @@ static inline void adis_dev_unlock(struct adis *adis) } int adis_single_conversion(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, unsigned int error_mask, - int *val); + const struct iio_chan_spec *chan, + unsigned int error_mask, int *val); #define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ .type = IIO_VOLTAGE, \ @@ -489,7 +490,7 @@ int adis_single_conversion(struct iio_dev *indio_dev, .modified = 1, \ .channel2 = IIO_MOD_ ## mod, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - info_sep, \ + (info_sep), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ @@ -523,7 +524,7 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_update_scan_mode(struct iio_dev *indio_dev, - const unsigned long *scan_mask); + const unsigned long *scan_mask); #else /* CONFIG_IIO_BUFFER */ @@ -547,7 +548,8 @@ static inline int devm_adis_probe_trigger(struct adis *adis, #ifdef CONFIG_DEBUG_FS int adis_debugfs_reg_access(struct iio_dev *indio_dev, - unsigned int reg, unsigned int writeval, unsigned int *readval); + unsigned int reg, unsigned int writeval, + unsigned int *readval); #else -- cgit v1.2.3-71-gd317 From a37d9a17f099072fe4d3a9048b0321978707a918 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 20 Jan 2022 23:53:04 +0200 Subject: fsnotify: invalidate dcache before IN_DELETE event Apparently, there are some applications that use IN_DELETE event as an invalidation mechanism and expect that if they try to open a file with the name reported with the delete event, that it should not contain the content of the deleted file. Commit 49246466a989 ("fsnotify: move fsnotify_nameremove() hook out of d_delete()") moved the fsnotify delete hook before d_delete() so fsnotify will have access to a positive dentry. This allowed a race where opening the deleted file via cached dentry is now possible after receiving the IN_DELETE event. To fix the regression, create a new hook fsnotify_delete() that takes the unlinked inode as an argument and use a helper d_delete_notify() to pin the inode, so we can pass it to fsnotify_delete() after d_delete(). Backporting hint: this regression is from v5.3. Although patch will apply with only trivial conflicts to v5.4 and v5.10, it won't build, because fsnotify_delete() implementation is different in each of those versions (see fsnotify_link()). A follow up patch will fix the fsnotify_unlink/rmdir() calls in pseudo filesystem that do not need to call d_delete(). Link: https://lore.kernel.org/r/20220120215305.282577-1-amir73il@gmail.com Reported-by: Ivan Delalande Link: https://lore.kernel.org/linux-fsdevel/YeNyzoDM5hP5LtGW@visor/ Fixes: 49246466a989 ("fsnotify: move fsnotify_nameremove() hook out of d_delete()") Cc: stable@vger.kernel.org # v5.3+ Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/btrfs/ioctl.c | 6 ++---- fs/namei.c | 10 +++++----- include/linux/fsnotify.h | 49 ++++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 50 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a5bd6926f7ff..7807b28b7892 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3086,10 +3086,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, btrfs_inode_lock(inode, 0); err = btrfs_delete_subvolume(dir, dentry); btrfs_inode_unlock(inode, 0); - if (!err) { - fsnotify_rmdir(dir, dentry); - d_delete(dentry); - } + if (!err) + d_delete_notify(dir, dentry); out_dput: dput(dentry); diff --git a/fs/namei.c b/fs/namei.c index d81f04f8d818..4ed0e41feab7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3974,13 +3974,12 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir, dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); - fsnotify_rmdir(dir, dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) - d_delete(dentry); + d_delete_notify(dir, dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); @@ -4102,7 +4101,6 @@ int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir, if (!error) { dont_mount(dentry); detach_mounts(dentry); - fsnotify_unlink(dir, dentry); } } } @@ -4110,9 +4108,11 @@ out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ - if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { + if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) { + fsnotify_unlink(dir, dentry); + } else if (!error) { fsnotify_link_count(target); - d_delete(dentry); + d_delete_notify(dir, dentry); } return error; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 3a2d7dc3c607..bb8467cd11ae 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -224,6 +224,43 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, dir, &new_dentry->d_name, 0); } +/* + * fsnotify_delete - @dentry was unlinked and unhashed + * + * Caller must make sure that dentry->d_name is stable. + * + * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode + * as this may be called after d_delete() and old_dentry may be negative. + */ +static inline void fsnotify_delete(struct inode *dir, struct inode *inode, + struct dentry *dentry) +{ + __u32 mask = FS_DELETE; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name, + 0); +} + +/** + * d_delete_notify - delete a dentry and call fsnotify_delete() + * @dentry: The dentry to delete + * + * This helper is used to guaranty that the unlinked inode cannot be found + * by lookup of this name after fsnotify_delete() event has been delivered. + */ +static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = d_inode(dentry); + + ihold(inode); + d_delete(dentry); + fsnotify_delete(dir, inode, dentry); + iput(inode); +} + /* * fsnotify_unlink - 'name' was unlinked * @@ -231,10 +268,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, */ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* @@ -258,10 +295,10 @@ static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry) */ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* -- cgit v1.2.3-71-gd317 From f1ba938e4f98941dc2b77795062e49444ec1fee1 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 19 Jan 2022 00:09:13 +0100 Subject: spi: s3c64xx: Delete unused boardfile helpers The helpers to use SPI host 1 and 2 are unused in the kernel and taking up space and maintenance hours. New systems should use device tree and not this, so delete the code. Cc: linux-samsung-soc@vger.kernel.org Cc: Krzysztof Kozlowski Cc: Sylwester Nawrocki Signed-off-by: Linus Walleij Reviewed-by: Sam Protsenko Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220118230915.157797-1-linus.walleij@linaro.org Signed-off-by: Mark Brown --- arch/arm/mach-s3c/Kconfig | 12 ------ arch/arm/mach-s3c/devs.c | 72 ------------------------------- arch/arm/mach-s3c/setup-spi-s3c64xx.c | 9 ---- arch/arm/mach-s3c/spi-core-s3c24xx.h | 6 --- include/linux/platform_data/spi-s3c64xx.h | 8 ---- 5 files changed, 107 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-s3c/Kconfig b/arch/arm/mach-s3c/Kconfig index 25606e668cf9..1899fc3f44fd 100644 --- a/arch/arm/mach-s3c/Kconfig +++ b/arch/arm/mach-s3c/Kconfig @@ -191,18 +191,6 @@ config S3C64XX_DEV_SPI0 Compile in platform device definitions for S3C64XX's type SPI controller 0 -config S3C64XX_DEV_SPI1 - bool - help - Compile in platform device definitions for S3C64XX's type - SPI controller 1 - -config S3C64XX_DEV_SPI2 - bool - help - Compile in platform device definitions for S3C64XX's type - SPI controller 2 - config SAMSUNG_DEV_TS bool help diff --git a/arch/arm/mach-s3c/devs.c b/arch/arm/mach-s3c/devs.c index 06dec64848f9..9f086aee862b 100644 --- a/arch/arm/mach-s3c/devs.c +++ b/arch/arm/mach-s3c/devs.c @@ -1125,75 +1125,3 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); } #endif /* CONFIG_S3C64XX_DEV_SPI0 */ - -#ifdef CONFIG_S3C64XX_DEV_SPI1 -static struct resource s3c64xx_spi1_resource[] = { - [0] = DEFINE_RES_MEM(S3C_PA_SPI1, SZ_256), - [1] = DEFINE_RES_IRQ(IRQ_SPI1), -}; - -struct platform_device s3c64xx_device_spi1 = { - .name = "s3c6410-spi", - .id = 1, - .num_resources = ARRAY_SIZE(s3c64xx_spi1_resource), - .resource = s3c64xx_spi1_resource, - .dev = { - .dma_mask = &samsung_device_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs) -{ - struct s3c64xx_spi_info pd; - - /* Reject invalid configuration */ - if (!num_cs || src_clk_nr < 0) { - pr_err("%s: Invalid SPI configuration\n", __func__); - return; - } - - pd.num_cs = num_cs; - pd.src_clk_nr = src_clk_nr; - pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; - - s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); -} -#endif /* CONFIG_S3C64XX_DEV_SPI1 */ - -#ifdef CONFIG_S3C64XX_DEV_SPI2 -static struct resource s3c64xx_spi2_resource[] = { - [0] = DEFINE_RES_MEM(S3C_PA_SPI2, SZ_256), - [1] = DEFINE_RES_IRQ(IRQ_SPI2), -}; - -struct platform_device s3c64xx_device_spi2 = { - .name = "s3c6410-spi", - .id = 2, - .num_resources = ARRAY_SIZE(s3c64xx_spi2_resource), - .resource = s3c64xx_spi2_resource, - .dev = { - .dma_mask = &samsung_device_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs) -{ - struct s3c64xx_spi_info pd; - - /* Reject invalid configuration */ - if (!num_cs || src_clk_nr < 0) { - pr_err("%s: Invalid SPI configuration\n", __func__); - return; - } - - pd.num_cs = num_cs; - pd.src_clk_nr = src_clk_nr; - pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; - - s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); -} -#endif /* CONFIG_S3C64XX_DEV_SPI2 */ diff --git a/arch/arm/mach-s3c/setup-spi-s3c64xx.c b/arch/arm/mach-s3c/setup-spi-s3c64xx.c index efcf78d41585..497aff71c29c 100644 --- a/arch/arm/mach-s3c/setup-spi-s3c64xx.c +++ b/arch/arm/mach-s3c/setup-spi-s3c64xx.c @@ -16,12 +16,3 @@ int s3c64xx_spi0_cfg_gpio(void) return 0; } #endif - -#ifdef CONFIG_S3C64XX_DEV_SPI1 -int s3c64xx_spi1_cfg_gpio(void) -{ - s3c_gpio_cfgall_range(S3C64XX_GPC(4), 3, - S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); - return 0; -} -#endif diff --git a/arch/arm/mach-s3c/spi-core-s3c24xx.h b/arch/arm/mach-s3c/spi-core-s3c24xx.h index 057667469cc3..919c5fd0c9af 100644 --- a/arch/arm/mach-s3c/spi-core-s3c24xx.h +++ b/arch/arm/mach-s3c/spi-core-s3c24xx.h @@ -16,12 +16,6 @@ static inline void s3c24xx_spi_setname(char *name) #ifdef CONFIG_S3C64XX_DEV_SPI0 s3c64xx_device_spi0.name = name; #endif -#ifdef CONFIG_S3C64XX_DEV_SPI1 - s3c64xx_device_spi1.name = name; -#endif -#ifdef CONFIG_S3C64XX_DEV_SPI2 - s3c64xx_device_spi2.name = name; -#endif } #endif /* __PLAT_S3C_SPI_CORE_S3C24XX_H */ diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 773daf7915a3..19d690f34670 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -52,17 +52,9 @@ struct s3c64xx_spi_info { */ extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, int num_cs); -extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); -extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); /* defined by architecture to configure gpio */ extern int s3c64xx_spi0_cfg_gpio(void); -extern int s3c64xx_spi1_cfg_gpio(void); -extern int s3c64xx_spi2_cfg_gpio(void); extern struct s3c64xx_spi_info s3c64xx_spi0_pdata; -extern struct s3c64xx_spi_info s3c64xx_spi1_pdata; -extern struct s3c64xx_spi_info s3c64xx_spi2_pdata; #endif /*__SPI_S3C64XX_H */ -- cgit v1.2.3-71-gd317 From 3b5529ae7f3578da633e8ae2ec0715a55a248f9f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 19 Jan 2022 00:09:14 +0100 Subject: spi: s3c64xx: Drop custom gpio setup argument The SPI0 platform population function was taking a custom gpio setup callback but the only user pass NULL as argument so drop this argument. Cc: linux-samsung-soc@vger.kernel.org Cc: Krzysztof Kozlowski Cc: Sylwester Nawrocki Signed-off-by: Linus Walleij Reviewed-by: Krzysztof Kozlowski Reviewed-by: Sam Protsenko Link: https://lore.kernel.org/r/20220118230915.157797-2-linus.walleij@linaro.org Signed-off-by: Mark Brown --- arch/arm/mach-s3c/devs.c | 5 ++--- arch/arm/mach-s3c/mach-crag6410.c | 2 +- include/linux/platform_data/spi-s3c64xx.h | 4 +--- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-s3c/devs.c b/arch/arm/mach-s3c/devs.c index 9f086aee862b..1e266fc24f9b 100644 --- a/arch/arm/mach-s3c/devs.c +++ b/arch/arm/mach-s3c/devs.c @@ -1107,8 +1107,7 @@ struct platform_device s3c64xx_device_spi0 = { }, }; -void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs) +void __init s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs) { struct s3c64xx_spi_info pd; @@ -1120,7 +1119,7 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, pd.num_cs = num_cs; pd.src_clk_nr = src_clk_nr; - pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; + pd.cfg_gpio = s3c64xx_spi0_cfg_gpio; s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); } diff --git a/arch/arm/mach-s3c/mach-crag6410.c b/arch/arm/mach-s3c/mach-crag6410.c index 4a12c75d407f..41f0aba2d2fd 100644 --- a/arch/arm/mach-s3c/mach-crag6410.c +++ b/arch/arm/mach-s3c/mach-crag6410.c @@ -856,7 +856,7 @@ static void __init crag6410_machine_init(void) i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); samsung_keypad_set_platdata(&crag6410_keypad_data); - s3c64xx_spi0_set_platdata(NULL, 0, 2); + s3c64xx_spi0_set_platdata(0, 2); pwm_add_table(crag6410_pwm_lookup, ARRAY_SIZE(crag6410_pwm_lookup)); platform_add_devices(crag6410_devices, ARRAY_SIZE(crag6410_devices)); diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 19d690f34670..10890a4b55b9 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -43,15 +43,13 @@ struct s3c64xx_spi_info { /** * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board * initialization code. - * @cfg_gpio: Pointer to gpio setup function. * @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks. * @num_cs: Number of elements in the 'cs' array. * * Call this from machine init code for each SPI Controller that * has some chips attached to it. */ -extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); +extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs); /* defined by architecture to configure gpio */ extern int s3c64xx_spi0_cfg_gpio(void); -- cgit v1.2.3-71-gd317 From a45cf3cc72dd9cfde9db8af32cdf9c431f53f9bc Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 19 Jan 2022 00:09:15 +0100 Subject: spi: s3c64xx: Convert to use GPIO descriptors Convert the S3C64xx SPI host to use GPIO descriptors. Provide GPIO descriptor tables for the one user with CS 0 and 1. Cc: linux-samsung-soc@vger.kernel.org Cc: Sylwester Nawrocki Reviewed-by: Krzysztof Kozlowski Reviewed-by: Sam Protsenko Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220118230915.157797-3-linus.walleij@linaro.org Signed-off-by: Mark Brown --- arch/arm/mach-s3c/mach-crag6410-module.c | 13 -------- arch/arm/mach-s3c/mach-crag6410.c | 11 +++++++ drivers/spi/spi-s3c64xx.c | 53 ++++++++----------------------- include/linux/platform_data/spi-s3c64xx.h | 2 -- 4 files changed, 24 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-s3c/mach-crag6410-module.c b/arch/arm/mach-s3c/mach-crag6410-module.c index 407ad493493e..5d1d4b67a4b7 100644 --- a/arch/arm/mach-s3c/mach-crag6410-module.c +++ b/arch/arm/mach-s3c/mach-crag6410-module.c @@ -32,10 +32,6 @@ #include "crag6410.h" -static struct s3c64xx_spi_csinfo wm0010_spi_csinfo = { - .line = S3C64XX_GPC(3), -}; - static struct wm0010_pdata wm0010_pdata = { .gpio_reset = S3C64XX_GPN(6), .reset_active_high = 1, /* Active high for Glenfarclas Rev 2 */ @@ -49,7 +45,6 @@ static struct spi_board_info wm1253_devs[] = { .chip_select = 0, .mode = SPI_MODE_0, .irq = S3C_EINT(4), - .controller_data = &wm0010_spi_csinfo, .platform_data = &wm0010_pdata, }, }; @@ -62,7 +57,6 @@ static struct spi_board_info balblair_devs[] = { .chip_select = 0, .mode = SPI_MODE_0, .irq = S3C_EINT(4), - .controller_data = &wm0010_spi_csinfo, .platform_data = &wm0010_pdata, }, }; @@ -229,10 +223,6 @@ static struct arizona_pdata wm5102_reva_pdata = { }, }; -static struct s3c64xx_spi_csinfo codec_spi_csinfo = { - .line = S3C64XX_GPN(5), -}; - static struct spi_board_info wm5102_reva_spi_devs[] = { [0] = { .modalias = "wm5102", @@ -242,7 +232,6 @@ static struct spi_board_info wm5102_reva_spi_devs[] = { .mode = SPI_MODE_0, .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, - .controller_data = &codec_spi_csinfo, .platform_data = &wm5102_reva_pdata, }, }; @@ -275,7 +264,6 @@ static struct spi_board_info wm5102_spi_devs[] = { .mode = SPI_MODE_0, .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, - .controller_data = &codec_spi_csinfo, .platform_data = &wm5102_pdata, }, }; @@ -298,7 +286,6 @@ static struct spi_board_info wm5110_spi_devs[] = { .mode = SPI_MODE_0, .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, - .controller_data = &codec_spi_csinfo, .platform_data = &wm5102_reva_pdata, }, }; diff --git a/arch/arm/mach-s3c/mach-crag6410.c b/arch/arm/mach-s3c/mach-crag6410.c index 41f0aba2d2fd..e3e0fe897bcc 100644 --- a/arch/arm/mach-s3c/mach-crag6410.c +++ b/arch/arm/mach-s3c/mach-crag6410.c @@ -825,6 +825,15 @@ static const struct gpio_led_platform_data gpio_leds_pdata = { static struct dwc2_hsotg_plat crag6410_hsotg_pdata; +static struct gpiod_lookup_table crag_spi0_gpiod_table = { + .dev_id = "s3c6410-spi.0", + .table = { + GPIO_LOOKUP_IDX("GPIOC", 3, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("GPION", 5, "cs", 1, GPIO_ACTIVE_LOW), + { }, + }, +}; + static void __init crag6410_machine_init(void) { /* Open drain IRQs need pullups */ @@ -856,6 +865,8 @@ static void __init crag6410_machine_init(void) i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); samsung_keypad_set_platdata(&crag6410_keypad_data); + + gpiod_add_lookup_table(&crag_spi0_gpiod_table); s3c64xx_spi0_set_platdata(0, 2); pwm_add_table(crag6410_pwm_lookup, ARRAY_SIZE(crag6410_pwm_lookup)); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 8755cd85e83c..3e42cdb19d27 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -13,10 +13,8 @@ #include #include #include -#include #include #include -#include #include @@ -656,7 +654,11 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master, struct s3c64xx_spi_csinfo *cs = spi->controller_data; /* Configure feedback delay */ - writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); + if (!cs) + /* No delay if not defined */ + writel(0, sdd->regs + S3C64XX_SPI_FB_CLK); + else + writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); return 0; } @@ -830,34 +832,16 @@ static int s3c64xx_spi_setup(struct spi_device *spi) if (spi->dev.of_node) { cs = s3c64xx_get_slave_ctrldata(spi); spi->controller_data = cs; - } else if (cs) { - /* On non-DT platforms the SPI core will set spi->cs_gpio - * to -ENOENT. The GPIO pin used to drive the chip select - * is defined by using platform data so spi->cs_gpio value - * has to be override to have the proper GPIO pin number. - */ - spi->cs_gpio = cs->line; } - if (IS_ERR_OR_NULL(cs)) { + /* NULL is fine, we just avoid using the FB delay (=0) */ + if (IS_ERR(cs)) { dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); return -ENODEV; } - if (!spi_get_ctldata(spi)) { - if (gpio_is_valid(spi->cs_gpio)) { - err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (err) { - dev_err(&spi->dev, - "Failed to get /CS gpio [%d]: %d\n", - spi->cs_gpio, err); - goto err_gpio_req; - } - } - + if (!spi_get_ctldata(spi)) spi_set_ctldata(spi, cs); - } pm_runtime_get_sync(&sdd->pdev->dev); @@ -909,11 +893,9 @@ setup_exit: /* setup() returns with device de-selected */ s3c64xx_spi_set_cs(spi, false); - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); spi_set_ctldata(spi, NULL); -err_gpio_req: + /* This was dynamically allocated on the DT path */ if (spi->dev.of_node) kfree(cs); @@ -924,19 +906,9 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi); - if (gpio_is_valid(spi->cs_gpio)) { - gpio_free(spi->cs_gpio); - if (spi->dev.of_node) - kfree(cs); - else { - /* On non-DT platforms, the SPI core sets - * spi->cs_gpio to -ENOENT and .setup() - * overrides it with the GPIO pin value - * passed using platform data. - */ - spi->cs_gpio = -ENOENT; - } - } + /* This was dynamically allocated on the DT path */ + if (spi->dev.of_node) + kfree(cs); spi_set_ctldata(spi, NULL); } @@ -1131,6 +1103,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) master->prepare_message = s3c64xx_spi_prepare_message; master->transfer_one = s3c64xx_spi_transfer_one; master->num_chipselect = sci->num_cs; + master->use_gpio_descriptors = true; master->dma_alignment = 8; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 10890a4b55b9..5df1ace6d2c9 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -16,7 +16,6 @@ struct platform_device; * struct s3c64xx_spi_csinfo - ChipSelect description * @fb_delay: Slave specific feedback delay. * Refer to FB_CLK_SEL register definition in SPI chapter. - * @line: Custom 'identity' of the CS line. * * This is per SPI-Slave Chipselect information. * Allocate and initialize one in machine init code and make the @@ -24,7 +23,6 @@ struct platform_device; */ struct s3c64xx_spi_csinfo { u8 fb_delay; - unsigned line; }; /** -- cgit v1.2.3-71-gd317 From 7f2a3cf4e6077a1525092f114be7819e505773a1 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 19 Jan 2022 01:09:14 +0100 Subject: spi: s3c24xx: Convert to GPIO descriptors This driver has a bunch of custom oldstyle GPIO number-passing fields and a custom set-up callback. The good thing is: nothing in the kernel is using it. Convert the driver to use GPIO descriptors with a SPI_MASTER_GPIO_SS flag so that the local CS callback also get invoked as the hardware needs this. New users of this driver can provide GPIO descriptor tables like the other converted drivers. Cc: linux-samsung-soc@vger.kernel.org Cc: Krzysztof Kozlowski Cc: Sylwester Nawrocki Signed-off-by: Linus Walleij Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220119000914.192553-1-linus.walleij@linaro.org Signed-off-by: Mark Brown --- drivers/spi/spi-s3c24xx.c | 47 +++------------------------------------------ include/linux/spi/s3c24xx.h | 5 ----- 2 files changed, 3 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c index d6f51695ca5b..660aa866af06 100644 --- a/drivers/spi/spi-s3c24xx.c +++ b/drivers/spi/spi-s3c24xx.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -62,9 +61,6 @@ struct s3c24xx_spi { unsigned char fiq_inuse; unsigned char fiq_claimed; - void (*set_cs)(struct s3c2410_spi_info *spi, - int cs, int pol); - /* data buffers */ const unsigned char *tx; unsigned char *rx; @@ -84,29 +80,21 @@ static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) return spi_master_get_devdata(sdev->master); } -static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) -{ - gpio_set_value(spi->pin_cs, pol); -} - static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) { struct s3c24xx_spi_devstate *cs = spi->controller_state; struct s3c24xx_spi *hw = to_hw(spi); - unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; /* change the chipselect state and the state of the spi engine clock */ switch (value) { case BITBANG_CS_INACTIVE: - hw->set_cs(hw->pdata, spi->chip_select, cspol^1); writeb(cs->spcon, hw->regs + S3C2410_SPCON); break; case BITBANG_CS_ACTIVE: writeb(cs->spcon | S3C2410_SPCON_ENSCK, hw->regs + S3C2410_SPCON); - hw->set_cs(hw->pdata, spi->chip_select, cspol); break; } } @@ -452,14 +440,6 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw) writeb(0xff, hw->regs + S3C2410_SPPRE); writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); - - if (hw->pdata) { - if (hw->set_cs == s3c24xx_spi_gpiocs) - gpio_direction_output(hw->pdata->pin_cs, 1); - - if (hw->pdata->gpio_setup) - hw->pdata->gpio_setup(hw->pdata, 1); - } } static int s3c24xx_spi_probe(struct platform_device *pdev) @@ -502,6 +482,9 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) master->num_chipselect = hw->pdata->num_cs; master->bus_num = pdata->bus_num; master->bits_per_word_mask = SPI_BPW_MASK(8); + /* we need to call the local chipselect callback */ + master->flags = SPI_MASTER_GPIO_SS; + master->use_gpio_descriptors = true; /* setup the state for the bitbang driver */ @@ -541,27 +524,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) goto err_no_pdata; } - /* setup any gpio we can */ - - if (!pdata->set_cs) { - if (pdata->pin_cs < 0) { - dev_err(&pdev->dev, "No chipselect pin\n"); - err = -EINVAL; - goto err_register; - } - - err = devm_gpio_request(&pdev->dev, pdata->pin_cs, - dev_name(&pdev->dev)); - if (err) { - dev_err(&pdev->dev, "Failed to get gpio for cs\n"); - goto err_register; - } - - hw->set_cs = s3c24xx_spi_gpiocs; - gpio_direction_output(pdata->pin_cs, 1); - } else - hw->set_cs = pdata->set_cs; - s3c24xx_spi_initialsetup(hw); /* register our spi controller */ @@ -604,9 +566,6 @@ static int s3c24xx_spi_suspend(struct device *dev) if (ret) return ret; - if (hw->pdata && hw->pdata->gpio_setup) - hw->pdata->gpio_setup(hw->pdata, 0); - clk_disable(hw->clk); return 0; } diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h index 440a71593162..9b8bb22d5b0c 100644 --- a/include/linux/spi/s3c24xx.h +++ b/include/linux/spi/s3c24xx.h @@ -10,14 +10,9 @@ #define __LINUX_SPI_S3C24XX_H __FILE__ struct s3c2410_spi_info { - int pin_cs; /* simple gpio cs */ unsigned int num_cs; /* total chipselects */ int bus_num; /* bus number to use. */ - unsigned int use_fiq:1; /* use fiq */ - - void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable); - void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); }; extern int s3c24xx_set_fiq(unsigned int irq, u32 *ack_ptr, bool on); -- cgit v1.2.3-71-gd317 From 9daf0a4d32d60a57f2a2533bdf4c178be7fdff7f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 16 Jan 2022 04:59:36 -0800 Subject: quota: cleanup double word in comment Remove the second 'handle'. Link: https://lore.kernel.org/r/20220116125936.389767-1-trix@redhat.com Signed-off-by: Tom Rix Signed-off-by: Jan Kara --- include/linux/quota.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/quota.h b/include/linux/quota.h index 18ebd39c9487..fd692b4a41d5 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid); * * When there is no mapping defined for the user-namespace, type, * qid tuple an invalid kqid is returned. Callers are expected to - * test for and handle handle invalid kqids being returned. + * test for and handle invalid kqids being returned. * Invalid kqids may be tested for using qid_valid(). */ static inline struct kqid make_kqid(struct user_namespace *from, -- cgit v1.2.3-71-gd317 From 376040e47334c6dc6a939a32197acceb00fe4acf Mon Sep 17 00:00:00 2001 From: Kenny Yu Date: Mon, 24 Jan 2022 10:54:01 -0800 Subject: bpf: Add bpf_copy_from_user_task() helper This adds a helper for bpf programs to read the memory of other tasks. As an example use case at Meta, we are using a bpf task iterator program and this new helper to print C++ async stack traces for all threads of a given process. Signed-off-by: Kenny Yu Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124185403.468466-3-kennyyu@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 11 +++++++++++ kernel/bpf/helpers.c | 34 ++++++++++++++++++++++++++++++++++ kernel/trace/bpf_trace.c | 2 ++ tools/include/uapi/linux/bpf.h | 11 +++++++++++ 5 files changed, 59 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8c92c974bd12..394305a5e02f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2243,6 +2243,7 @@ extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; extern const struct bpf_func_proto bpf_find_vma_proto; extern const struct bpf_func_proto bpf_loop_proto; extern const struct bpf_func_proto bpf_strncmp_proto; +extern const struct bpf_func_proto bpf_copy_from_user_task_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 16a7574292a5..4a2f7041ebae 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5076,6 +5076,16 @@ union bpf_attr { * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) + * Description + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * Return + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5269,6 +5279,7 @@ union bpf_attr { FN(xdp_get_buff_len), \ FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ + FN(copy_from_user_task), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 01cfdf40c838..ed2780b76cc1 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "../../lib/kstrtox.h" @@ -671,6 +672,39 @@ const struct bpf_func_proto bpf_copy_from_user_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, + const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) +{ + int ret; + + /* flags is not used yet */ + if (unlikely(flags)) + return -EINVAL; + + if (unlikely(!size)) + return 0; + + ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); + if (ret == size) + return 0; + + memset(dst, 0, size); + /* Return -EFAULT for partial read */ + return ret < 0 ? ret : -EFAULT; +} + +const struct bpf_func_proto bpf_copy_from_user_task_proto = { + .func = bpf_copy_from_user_task, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_BTF_ID, + .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], + .arg5_type = ARG_ANYTHING +}; + BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) { if (cpu >= nr_cpu_ids) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 06a9e220069e..a2024ba32a20 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1235,6 +1235,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_task_stack_proto; case BPF_FUNC_copy_from_user: return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; + case BPF_FUNC_copy_from_user_task: + return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; case BPF_FUNC_per_cpu_ptr: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 16a7574292a5..4a2f7041ebae 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5076,6 +5076,16 @@ union bpf_attr { * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) + * Description + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * Return + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5269,6 +5279,7 @@ union bpf_attr { FN(xdp_get_buff_len), \ FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ + FN(copy_from_user_task), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From 945c37ed564770c78dfe6b9f08bed57a1b4e60ef Mon Sep 17 00:00:00 2001 From: Linyu Yuan Date: Mon, 10 Jan 2022 20:43:28 +0800 Subject: usb: roles: fix include/linux/usb/role.h compile issue when CONFIG_USB_ROLE_SWITCH is not defined, add usb_role_switch_find_by_fwnode() definition which return NULL. Fixes: c6919d5e0cd1 ("usb: roles: Add usb_role_switch_find_by_fwnode()") Signed-off-by: Linyu Yuan Link: https://lore.kernel.org/r/1641818608-25039-1-git-send-email-quic_linyyuan@quicinc.com Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/role.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index 031f148ab373..b5deafd91f67 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -91,6 +91,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node) static inline void usb_role_switch_put(struct usb_role_switch *sw) { } +static inline struct usb_role_switch * +usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc) -- cgit v1.2.3-71-gd317 From 33569ef3c754a82010f266b7b938a66a3ccf90a4 Mon Sep 17 00:00:00 2001 From: Amadeusz Sławiński Date: Wed, 19 Jan 2022 11:47:51 +0100 Subject: PM: hibernate: Remove register_nosave_region_late() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is an unused wrapper forcing kmalloc allocation for registering nosave regions. Also, rename __register_nosave_region() to register_nosave_region() now that there is no need for disambiguation. Signed-off-by: Amadeusz Sławiński Reviewed-by: Cezary Rojewski Signed-off-by: Rafael J. Wysocki --- include/linux/suspend.h | 11 +---------- kernel/power/snapshot.c | 21 +++++++-------------- 2 files changed, 8 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5785d909c321..3e8ecdebe601 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -430,15 +430,7 @@ struct platform_hibernation_ops { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ -extern void __register_nosave_region(unsigned long b, unsigned long e, int km); -static inline void __init register_nosave_region(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 0); -} -static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 1); -} +extern void register_nosave_region(unsigned long b, unsigned long e); extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); @@ -458,7 +450,6 @@ int pfn_is_nosave(unsigned long pfn); int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} -static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f7a986078213..330d49937692 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -978,8 +978,7 @@ static void memory_bm_recycle(struct memory_bitmap *bm) * Register a range of page frames the contents of which should not be saved * during hibernation (to be used in the early initialization code). */ -void __init __register_nosave_region(unsigned long start_pfn, - unsigned long end_pfn, int use_kmalloc) +void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) { struct nosave_region *region; @@ -995,18 +994,12 @@ void __init __register_nosave_region(unsigned long start_pfn, goto Report; } } - if (use_kmalloc) { - /* During init, this shouldn't fail */ - region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); - BUG_ON(!region); - } else { - /* This allocation cannot fail */ - region = memblock_alloc(sizeof(struct nosave_region), - SMP_CACHE_BYTES); - if (!region) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct nosave_region)); - } + /* This allocation cannot fail */ + region = memblock_alloc(sizeof(struct nosave_region), + SMP_CACHE_BYTES); + if (!region) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct nosave_region)); region->start_pfn = start_pfn; region->end_pfn = end_pfn; list_add_tail(®ion->list, &nosave_regions); -- cgit v1.2.3-71-gd317 From 1dc01abad6544cb9d884071b626b706e37aa9601 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 13 Jan 2022 16:53:57 +0100 Subject: cpumask: Always inline helpers which use bit manipulation functions Former are always inlined so do that for the latter too, for consistency. Size impact is a whopping 5 bytes increase! :-) text data bss dec hex filename 22350551 8213184 1917164 32480899 1ef9e83 vmlinux.x86-64.defconfig.before 22350556 8213152 1917164 32480872 1ef9e68 vmlinux.x86-64.defconfig.after Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Marco Elver Link: https://lore.kernel.org/r/20220113155357.4706-3-bp@alien8.de --- include/linux/cpumask.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 64dae70d31f5..6b06c698cd2a 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -341,12 +341,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ -static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } @@ -357,12 +357,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ -static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) { clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) +static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) { __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } @@ -374,7 +374,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * Returns 1 if @cpu is set in @cpumask, else returns 0 */ -static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); } @@ -388,7 +388,7 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) * * test_and_set_bit wrapper for cpumasks. */ -static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } @@ -402,7 +402,7 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) * * test_and_clear_bit wrapper for cpumasks. */ -static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } -- cgit v1.2.3-71-gd317 From be6ec5b7026620b931e0fa9287d24ad2cd2ab9b6 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 26 Jan 2022 10:25:55 +0000 Subject: net: xpcs: add support for retrieving supported interface modes Add a function to the xpcs driver to retrieve the supported PHY interface modes, which can be used by drivers to fill in phylink's supported_interfaces mask. We validate the interface bit index to ensure that it fits within the bitmap as xpcs lists PHY_INTERFACE_MODE_MAX in an entry. Tested-by: Wong Vee Khee # Intel EHL Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/pcs/pcs-xpcs.c | 14 ++++++++++++++ include/linux/pcs/pcs-xpcs.h | 1 + 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index cd6742e6ba8b..f45821524fab 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -662,6 +662,20 @@ void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, } EXPORT_SYMBOL_GPL(xpcs_validate); +void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) +{ + int i, j; + + for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { + const struct xpcs_compat *compat = &xpcs->id->compat[i]; + + for (j = 0; j < compat->num_interfaces; j++) + if (compat->interface[j] < PHY_INTERFACE_MODE_MAX) + __set_bit(compat->interface[j], interfaces); + } +} +EXPORT_SYMBOL_GPL(xpcs_get_interfaces); + int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable) { int ret; diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index add077a81b21..3126a4924d92 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -33,6 +33,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, unsigned int mode); void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, struct phylink_link_state *state); +void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, -- cgit v1.2.3-71-gd317 From fe70fb74b56407c1a5be347258082f8abbe7956d Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 26 Jan 2022 10:26:10 +0000 Subject: net: stmmac/xpcs: convert to pcs_validate() stmmac explicitly calls the xpcs driver to validate the ethtool linkmodes. This is no longer necessary as phylink now supports validation through a PCS method. Convert both drivers to use this new mechanism. Tested-by: Wong Vee Khee # Intel EHL Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ----- drivers/net/pcs/pcs-xpcs.c | 27 +++++++++-------------- include/linux/pcs/pcs-xpcs.h | 2 -- 3 files changed, 11 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bd20920daf7b..029f21b9d452 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -940,7 +940,6 @@ static void stmmac_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state) { - struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; /* This is very similar to phylink_generic_validate() except that @@ -958,10 +957,6 @@ static void stmmac_validate(struct phylink_config *config, linkmode_and(supported, supported, mac_supported); linkmode_and(state->advertising, state->advertising, mac_supported); - - /* If PCS is supported, check which modes it supports. */ - if (priv->hw->xpcs) - xpcs_validate(priv->hw->xpcs, supported, state); } static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index f45821524fab..61418d4dc0cd 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -632,35 +632,29 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs, } } -void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, - struct phylink_link_state *state) +static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, + const struct phylink_link_state *state) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, }; const struct xpcs_compat *compat; + struct dw_xpcs *xpcs; int i; - /* phylink expects us to report all supported modes with - * PHY_INTERFACE_MODE_NA, just don't limit the supported and - * advertising masks and exit. - */ - if (state->interface == PHY_INTERFACE_MODE_NA) - return; - - linkmode_zero(xpcs_supported); - + xpcs = phylink_pcs_to_xpcs(pcs); compat = xpcs_find_compat(xpcs->id, state->interface); - /* Populate the supported link modes for this - * PHY interface type + /* Populate the supported link modes for this PHY interface type. + * FIXME: what about the port modes and autoneg bit? This masks + * all those away. */ if (compat) for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++) set_bit(compat->supported[i], xpcs_supported); linkmode_and(supported, supported, xpcs_supported); - linkmode_and(state->advertising, state->advertising, xpcs_supported); + + return 0; } -EXPORT_SYMBOL_GPL(xpcs_validate); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) { @@ -1120,6 +1114,7 @@ static const struct xpcs_id xpcs_id_list[] = { }; static const struct phylink_pcs_ops xpcs_phylink_ops = { + .pcs_validate = xpcs_validate, .pcs_config = xpcs_config, .pcs_get_state = xpcs_get_state, .pcs_link_up = xpcs_link_up, diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 3126a4924d92..266eb26fb029 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -31,8 +31,6 @@ void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, unsigned int mode); -void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, - struct phylink_link_state *state); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); -- cgit v1.2.3-71-gd317 From 4e2a44c1408b6a6a46122704511234f68cf012b8 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 24 Jan 2022 08:14:22 +0100 Subject: tty: add kfifo to tty_port Define a kfifo inside struct tty_port. We use DECLARE_KFIFO_PTR and let the preexisting tty_port::xmit_buf be also the buffer for the kfifo. And handle the initialization/decomissioning along with xmit_buf, i.e. in tty_port_alloc_xmit_buf() and tty_port_free_xmit_buf(), respectively. This allows for kfifo use in drivers which opt-in, while others still may use the old xmit_buf. mxser will be the first user in the next few patches. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20220124071430.14907-4-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_port.c | 6 +++++- include/linux/tty_port.h | 3 +++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 7709ce655f44..7644834640f1 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -225,8 +225,11 @@ int tty_port_alloc_xmit_buf(struct tty_port *port) { /* We may sleep in get_zeroed_page() */ mutex_lock(&port->buf_mutex); - if (port->xmit_buf == NULL) + if (port->xmit_buf == NULL) { port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); + if (port->xmit_buf) + kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE); + } mutex_unlock(&port->buf_mutex); if (port->xmit_buf == NULL) return -ENOMEM; @@ -240,6 +243,7 @@ void tty_port_free_xmit_buf(struct tty_port *port) if (port->xmit_buf != NULL) { free_page((unsigned long)port->xmit_buf); port->xmit_buf = NULL; + INIT_KFIFO(port->xmit_fifo); } mutex_unlock(&port->buf_mutex); } diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index d3ea9ed0b98e..58e9619116b7 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -2,6 +2,7 @@ #ifndef _LINUX_TTY_PORT_H #define _LINUX_TTY_PORT_H +#include #include #include #include @@ -67,6 +68,7 @@ extern const struct tty_port_client_operations tty_port_default_client_ops; * @mutex: locking, for open, shutdown and other port operations * @buf_mutex: @xmit_buf alloc lock * @xmit_buf: optional xmit buffer used by some drivers + * @xmit_fifo: optional xmit buffer used by some drivers * @close_delay: delay in jiffies to wait when closing the port * @closing_wait: delay in jiffies for output to be sent before closing * @drain_delay: set to zero if no pure time based drain is needed else set to @@ -110,6 +112,7 @@ struct tty_port { struct mutex mutex; struct mutex buf_mutex; unsigned char *xmit_buf; + DECLARE_KFIFO_PTR(xmit_fifo, unsigned char); unsigned int close_delay; unsigned int closing_wait; int drain_delay; -- cgit v1.2.3-71-gd317 From ebb7fb1557b1d03b906b668aa2164b51e6b7d19a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 26 Jan 2022 09:19:20 -0800 Subject: xfs, iomap: limit individual ioend chain lengths in writeback Trond Myklebust reported soft lockups in XFS IO completion such as this: watchdog: BUG: soft lockup - CPU#12 stuck for 23s! [kworker/12:1:3106] CPU: 12 PID: 3106 Comm: kworker/12:1 Not tainted 4.18.0-305.10.2.el8_4.x86_64 #1 Workqueue: xfs-conv/md127 xfs_end_io [xfs] RIP: 0010:_raw_spin_unlock_irqrestore+0x11/0x20 Call Trace: wake_up_page_bit+0x8a/0x110 iomap_finish_ioend+0xd7/0x1c0 iomap_finish_ioends+0x7f/0xb0 xfs_end_ioend+0x6b/0x100 [xfs] xfs_end_io+0xb9/0xe0 [xfs] process_one_work+0x1a7/0x360 worker_thread+0x1fa/0x390 kthread+0x116/0x130 ret_from_fork+0x35/0x40 Ioends are processed as an atomic completion unit when all the chained bios in the ioend have completed their IO. Logically contiguous ioends can also be merged and completed as a single, larger unit. Both of these things can be problematic as both the bio chains per ioend and the size of the merged ioends processed as a single completion are both unbound. If we have a large sequential dirty region in the page cache, write_cache_pages() will keep feeding us sequential pages and we will keep mapping them into ioends and bios until we get a dirty page at a non-sequential file offset. These large sequential runs can will result in bio and ioend chaining to optimise the io patterns. The pages iunder writeback are pinned within these chains until the submission chaining is broken, allowing the entire chain to be completed. This can result in huge chains being processed in IO completion context. We get deep bio chaining if we have large contiguous physical extents. We will keep adding pages to the current bio until it is full, then we'll chain a new bio to keep adding pages for writeback. Hence we can build bio chains that map millions of pages and tens of gigabytes of RAM if the page cache contains big enough contiguous dirty file regions. This long bio chain pins those pages until the final bio in the chain completes and the ioend can iterate all the chained bios and complete them. OTOH, if we have a physically fragmented file, we end up submitting one ioend per physical fragment that each have a small bio or bio chain attached to them. We do not chain these at IO submission time, but instead we chain them at completion time based on file offset via iomap_ioend_try_merge(). Hence we can end up with unbound ioend chains being built via completion merging. XFS can then do COW remapping or unwritten extent conversion on that merged chain, which involves walking an extent fragment at a time and running a transaction to modify the physical extent information. IOWs, we merge all the discontiguous ioends together into a contiguous file range, only to then process them individually as discontiguous extents. This extent manipulation is computationally expensive and can run in a tight loop, so merging logically contiguous but physically discontigous ioends gains us nothing except for hiding the fact the fact we broke the ioends up into individual physical extents at submission and then need to loop over those individual physical extents at completion. Hence we need to have mechanisms to limit ioend sizes and to break up completion processing of large merged ioend chains: 1. bio chains per ioend need to be bound in length. Pure overwrites go straight to iomap_finish_ioend() in softirq context with the exact bio chain attached to the ioend by submission. Hence the only way to prevent long holdoffs here is to bound ioend submission sizes because we can't reschedule in softirq context. 2. iomap_finish_ioends() has to handle unbound merged ioend chains correctly. This relies on any one call to iomap_finish_ioend() being bound in runtime so that cond_resched() can be issued regularly as the long ioend chain is processed. i.e. this relies on mechanism #1 to limit individual ioend sizes to work correctly. 3. filesystems have to loop over the merged ioends to process physical extent manipulations. This means they can loop internally, and so we break merging at physical extent boundaries so the filesystem can easily insert reschedule points between individual extent manipulations. Signed-off-by: Dave Chinner Reported-and-tested-by: Trond Myklebust Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/iomap/buffered-io.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_aops.c | 16 +++++++++++++++- include/linux/iomap.h | 2 ++ 3 files changed, 65 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c938bbad075e..6c51a75d0be6 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -21,6 +21,8 @@ #include "../internal.h" +#define IOEND_BATCH_SIZE 4096 + /* * Structure allocated for each folio when block size < folio size * to track sub-folio uptodate status and I/O completions. @@ -1039,7 +1041,7 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ -static void +static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; @@ -1048,6 +1050,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) u64 start = bio->bi_iter.bi_sector; loff_t offset = ioend->io_offset; bool quiet = bio_flagged(bio, BIO_QUIET); + u32 folio_count = 0; for (bio = &ioend->io_inline_bio; bio; bio = next) { struct folio_iter fi; @@ -1062,9 +1065,11 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) next = bio->bi_private; /* walk all folios in bio, ending page IO on them */ - bio_for_each_folio_all(fi, bio) + bio_for_each_folio_all(fi, bio) { iomap_finish_folio_write(inode, fi.folio, fi.length, error); + folio_count++; + } bio_put(bio); } /* The ioend has been freed by bio_put() */ @@ -1074,20 +1079,36 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, offset, start); } + return folio_count; } +/* + * Ioend completion routine for merged bios. This can only be called from task + * contexts as merged ioends can be of unbound length. Hence we have to break up + * the writeback completions into manageable chunks to avoid long scheduler + * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get + * good batch processing throughput without creating adverse scheduler latency + * conditions. + */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; + u32 completions; + + might_sleep(); list_replace_init(&ioend->io_list, &tmp); - iomap_finish_ioend(ioend, error); + completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { + if (completions > IOEND_BATCH_SIZE * 8) { + cond_resched(); + completions = 0; + } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); - iomap_finish_ioend(ioend, error); + completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); @@ -1108,6 +1129,18 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; + /* + * Do not merge physically discontiguous ioends. The filesystem + * completion functions will have to iterate the physical + * discontiguities even if we merge the ioends at a logical level, so + * we don't gain anything by merging physical discontiguities here. + * + * We cannot use bio->bi_iter.bi_sector here as it is modified during + * submission so does not point to the start sector of the bio at + * completion. + */ + if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) + return false; return true; } @@ -1209,8 +1242,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, ioend->io_flags = wpc->iomap.flags; ioend->io_inode = inode; ioend->io_size = 0; + ioend->io_folios = 0; ioend->io_offset = offset; ioend->io_bio = bio; + ioend->io_sector = sector; return ioend; } @@ -1251,6 +1286,13 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, return false; if (sector != bio_end_sector(wpc->ioend->io_bio)) return false; + /* + * Limit ioend bio chain lengths to minimise IO completion latency. This + * also prevents long tight loops ending page writeback on all the + * folios in the ioend. + */ + if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) + return false; return true; } @@ -1335,6 +1377,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, &submit_list); count++; } + if (count) + wpc->ioend->io_folios++; WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); WARN_ON_ONCE(!folio_test_locked(folio)); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 2705f91bdd0d..9d6a67c7d227 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -136,7 +136,20 @@ done: memalloc_nofs_restore(nofs_flag); } -/* Finish all pending io completions. */ +/* + * Finish all pending IO completions that require transactional modifications. + * + * We try to merge physical and logically contiguous ioends before completion to + * minimise the number of transactions we need to perform during IO completion. + * Both unwritten extent conversion and COW remapping need to iterate and modify + * one physical extent at a time, so we gain nothing by merging physically + * discontiguous extents here. + * + * The ioend chain length that we can be processing here is largely unbound in + * length and we may have to perform significant amounts of work on each ioend + * to complete it. Hence we have to be careful about holding the CPU for too + * long in this loop. + */ void xfs_end_io( struct work_struct *work) @@ -157,6 +170,7 @@ xfs_end_io( list_del_init(&ioend->io_list); iomap_ioend_try_merge(ioend, &tmp); xfs_end_ioend(ioend); + cond_resched(); } } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index b55bd49e55f5..97a3a2edb585 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -263,9 +263,11 @@ struct iomap_ioend { struct list_head io_list; /* next ioend in chain */ u16 io_type; u16 io_flags; /* IOMAP_F_* */ + u32 io_folios; /* folios added to ioend */ struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ + sector_t io_sector; /* start sector of ioend */ struct bio *io_bio; /* bio being built */ struct bio io_inline_bio; /* MUST BE LAST! */ }; -- cgit v1.2.3-71-gd317 From 597568e8df046ebf349c706b281a711297ab20fb Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 25 Jan 2022 13:50:07 +0800 Subject: misc: rtsx: Rework runtime power management flow Commit 5b4258f6721f ("misc: rtsx: rts5249 support runtime PM") uses "rtd3_work" and "idle_work" to manage it's own runtime PM state machine. When its child device, rtsx_pci_sdmmc, uses runtime PM refcount correctly, all the additional works can be managed by generic runtime PM helpers. So consolidate "idle_work" and "rtd3_work" into generic runtime idle callback and runtime suspend callback, respectively. Fixes: 5b4258f6721f ("misc: rtsx: rts5249 support runtime PM") Cc: Ricky WU Tested-by: Ricky WU Signed-off-by: Kai-Heng Feng Link: https://lore.kernel.org/r/20220125055010.1866563-2-kai.heng.feng@canonical.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 118 ++++++++++++------------------------- include/linux/rtsx_pci.h | 3 - 2 files changed, 39 insertions(+), 82 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 6ac509c1821c..8aba47a7d973 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -152,20 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr) if (pcr->remove_pci) return; - if (pcr->rtd3_en) - if (pcr->is_runtime_suspended) { - pm_runtime_get(&(pcr->pci->dev)); - pcr->is_runtime_suspended = false; - } - if (pcr->state != PDEV_STAT_RUN) { pcr->state = PDEV_STAT_RUN; if (pcr->ops->enable_auto_blink) pcr->ops->enable_auto_blink(pcr); rtsx_pm_full_on(pcr); } - - mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); } EXPORT_SYMBOL_GPL(rtsx_pci_start_run); @@ -1094,40 +1086,6 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) rtsx_comm_pm_power_saving(pcr); } -static void rtsx_pci_rtd3_work(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work); - - pcr_dbg(pcr, "--> %s\n", __func__); - if (!pcr->is_runtime_suspended) - pm_runtime_put(&(pcr->pci->dev)); -} - -static void rtsx_pci_idle_work(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); - - pcr_dbg(pcr, "--> %s\n", __func__); - - mutex_lock(&pcr->pcr_mutex); - - pcr->state = PDEV_STAT_IDLE; - - if (pcr->ops->disable_auto_blink) - pcr->ops->disable_auto_blink(pcr); - if (pcr->ops->turn_off_led) - pcr->ops->turn_off_led(pcr); - - rtsx_pm_power_saving(pcr); - - mutex_unlock(&pcr->pcr_mutex); - - if (pcr->rtd3_en) - mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000)); -} - static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) { /* Set relink_time to 0 */ @@ -1598,7 +1556,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->card_inserted = 0; pcr->card_removed = 0; INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); - INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); pcr->msi_en = msi_en; if (pcr->msi_en) { @@ -1623,20 +1580,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, rtsx_pcr_cells[i].pdata_size = sizeof(*handle); } - if (pcr->rtd3_en) { - INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work); - pm_runtime_allow(&pcidev->dev); - pm_runtime_enable(&pcidev->dev); - pcr->is_runtime_suspended = false; - } - ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); if (ret < 0) goto free_slots; - schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + pm_runtime_allow(&pcidev->dev); + pm_runtime_put(&pcidev->dev); return 0; @@ -1668,11 +1619,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) struct pcr_handle *handle = pci_get_drvdata(pcidev); struct rtsx_pcr *pcr = handle->pcr; - if (pcr->rtd3_en) - pm_runtime_get_noresume(&pcr->pci->dev); - pcr->remove_pci = true; + pm_runtime_get_sync(&pcidev->dev); + pm_runtime_forbid(&pcidev->dev); + /* Disable interrupts at the pcr level */ spin_lock_irq(&pcr->lock); rtsx_pci_writel(pcr, RTSX_BIER, 0); @@ -1680,9 +1631,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) spin_unlock_irq(&pcr->lock); cancel_delayed_work_sync(&pcr->carddet_work); - cancel_delayed_work_sync(&pcr->idle_work); - if (pcr->rtd3_en) - cancel_delayed_work_sync(&pcr->rtd3_work); mfd_remove_devices(&pcidev->dev); @@ -1700,11 +1648,6 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) idr_remove(&rtsx_pci_idr, pcr->id); spin_unlock(&rtsx_pci_lock); - if (pcr->rtd3_en) { - pm_runtime_disable(&pcr->pci->dev); - pm_runtime_put_noidle(&pcr->pci->dev); - } - kfree(pcr->slots); kfree(pcr); kfree(handle); @@ -1726,7 +1669,6 @@ static int __maybe_unused rtsx_pci_suspend(struct device *dev_d) pcr = handle->pcr; cancel_delayed_work(&pcr->carddet_work); - cancel_delayed_work(&pcr->idle_work); mutex_lock(&pcr->pcr_mutex); @@ -1760,8 +1702,6 @@ static int __maybe_unused rtsx_pci_resume(struct device *dev_d) if (ret) goto out; - schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); - out: mutex_unlock(&pcr->pcr_mutex); return ret; @@ -1786,6 +1726,33 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev) pci_disable_msi(pcr->pci); } +static int rtsx_pci_runtime_idle(struct device *device) +{ + struct pci_dev *pcidev = to_pci_dev(device); + struct pcr_handle *handle = pci_get_drvdata(pcidev); + struct rtsx_pcr *pcr = handle->pcr; + + dev_dbg(device, "--> %s\n", __func__); + + mutex_lock(&pcr->pcr_mutex); + + pcr->state = PDEV_STAT_IDLE; + + if (pcr->ops->disable_auto_blink) + pcr->ops->disable_auto_blink(pcr); + if (pcr->ops->turn_off_led) + pcr->ops->turn_off_led(pcr); + + rtsx_pm_power_saving(pcr); + + mutex_unlock(&pcr->pcr_mutex); + + if (pcr->rtd3_en) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + static int rtsx_pci_runtime_suspend(struct device *device) { struct pci_dev *pcidev = to_pci_dev(device); @@ -1794,31 +1761,26 @@ static int rtsx_pci_runtime_suspend(struct device *device) handle = pci_get_drvdata(pcidev); pcr = handle->pcr; - dev_dbg(&(pcidev->dev), "--> %s\n", __func__); - cancel_delayed_work(&pcr->carddet_work); - cancel_delayed_work(&pcr->rtd3_work); - cancel_delayed_work(&pcr->idle_work); + dev_dbg(device, "--> %s\n", __func__); + + cancel_delayed_work_sync(&pcr->carddet_work); mutex_lock(&pcr->pcr_mutex); rtsx_pci_power_off(pcr, HOST_ENTER_S3); mutex_unlock(&pcr->pcr_mutex); - pcr->is_runtime_suspended = true; - return 0; } static int rtsx_pci_runtime_resume(struct device *device) { struct pci_dev *pcidev = to_pci_dev(device); - struct pcr_handle *handle; - struct rtsx_pcr *pcr; + struct pcr_handle *handle = pci_get_drvdata(pcidev); + struct rtsx_pcr *pcr = handle->pcr; - handle = pci_get_drvdata(pcidev); - pcr = handle->pcr; - dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + dev_dbg(device, "--> %s\n", __func__); mutex_lock(&pcr->pcr_mutex); @@ -1834,8 +1796,6 @@ static int rtsx_pci_runtime_resume(struct device *device) pcr->slots[RTSX_SD_CARD].p_dev); } - schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); - mutex_unlock(&pcr->pcr_mutex); return 0; } @@ -1850,7 +1810,7 @@ static int rtsx_pci_runtime_resume(struct device *device) static const struct dev_pm_ops rtsx_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume) - SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL) + SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle) }; static struct pci_driver rtsx_pci_driver = { diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 4ab7bfc675f1..89b7d34e25b6 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1201,8 +1201,6 @@ struct rtsx_pcr { unsigned int card_exist; struct delayed_work carddet_work; - struct delayed_work idle_work; - struct delayed_work rtd3_work; spinlock_t lock; struct mutex pcr_mutex; @@ -1212,7 +1210,6 @@ struct rtsx_pcr { unsigned int cur_clock; bool remove_pci; bool msi_en; - bool is_runtime_suspended; #define EXTRA_CAPS_SD_SDR50 (1 << 0) #define EXTRA_CAPS_SD_SDR104 (1 << 1) -- cgit v1.2.3-71-gd317 From 71732e24609b5a7af96efc89aebde55f76c1de3e Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 25 Jan 2022 13:50:09 +0800 Subject: misc: rtsx: Quiesce rts5249 on system suspend Set more registers in force_power_down callback to avoid S3 wakeup from hotplugging cards. This is originally written by Ricky WU. Link: https://lore.kernel.org/lkml/c4525b4738f94483b9b8f8571fc80646@realtek.com/ Cc: Ricky WU Tested-by: Ricky WU Signed-off-by: Kai-Heng Feng Link: https://lore.kernel.org/r/20220125055010.1866563-4-kai.heng.feng@canonical.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtl8411.c | 2 +- drivers/misc/cardreader/rts5209.c | 2 +- drivers/misc/cardreader/rts5228.c | 2 +- drivers/misc/cardreader/rts5229.c | 2 +- drivers/misc/cardreader/rts5249.c | 31 +++++++++++++++++++++++++++++-- drivers/misc/cardreader/rts5261.c | 2 +- drivers/misc/cardreader/rtsx_pcr.c | 14 +++++++------- drivers/misc/cardreader/rtsx_pcr.h | 1 + include/linux/rtsx_pci.h | 2 +- 9 files changed, 43 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c index 4c5621b17a6f..06457e875a90 100644 --- a/drivers/misc/cardreader/rtl8411.c +++ b/drivers/misc/cardreader/rtl8411.c @@ -76,7 +76,7 @@ static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr) map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg)); } -static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07); } diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c index 29f5414072bf..52b0a476ba51 100644 --- a/drivers/misc/cardreader/rts5209.c +++ b/drivers/misc/cardreader/rts5209.c @@ -47,7 +47,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr) } } -static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07); } diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index ffc128278613..ffe3afbf8bfe 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -91,7 +91,7 @@ static int rts5228_optimize_phy(struct rtsx_pcr *pcr) return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40); } -static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c index c748eaf1ec1f..b0edd8006d52 100644 --- a/drivers/misc/cardreader/rts5229.c +++ b/drivers/misc/cardreader/rts5229.c @@ -44,7 +44,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr) map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg)); } -static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03); } diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 53f3a1f45c4a..91d240dd68fa 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -74,7 +74,8 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); - pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg); + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) + pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg); if (rtsx_check_mmc_support(reg)) pcr->extra_caps |= EXTRA_CAPS_NO_MMC; @@ -143,6 +144,27 @@ static int rts5249_init_from_hw(struct rtsx_pcr *pcr) return 0; } +static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + if (!runtime) { + rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, 0); + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + } + + rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN); +} + static void rts52xa_save_content_from_efuse(struct rtsx_pcr *pcr) { u8 cnt, sv; @@ -281,8 +303,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); - if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); + rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + } if (pcr->rtd3_en) { if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { @@ -724,6 +749,7 @@ static const struct pcr_ops rts524a_pcr_ops = { .card_power_on = rtsx_base_card_power_on, .card_power_off = rtsx_base_card_power_off, .switch_output_voltage = rtsx_base_switch_output_voltage, + .force_power_down = rts52xa_force_power_down, .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0, }; @@ -841,6 +867,7 @@ static const struct pcr_ops rts525a_pcr_ops = { .card_power_on = rts525a_card_power_on, .card_power_off = rtsx_base_card_power_off, .switch_output_voltage = rts525a_switch_output_voltage, + .force_power_down = rts52xa_force_power_down, .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0, }; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 1fd4e0e50730..64333347c14a 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -91,7 +91,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg); } -static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 3c97d3b50456..1cb6425e8369 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1086,7 +1086,7 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) rtsx_comm_pm_power_saving(pcr); } -static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +static void rtsx_base_force_power_down(struct rtsx_pcr *pcr) { /* Set relink_time to 0 */ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); @@ -1100,7 +1100,7 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN); } -static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) +static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { if (pcr->ops->turn_off_led) pcr->ops->turn_off_led(pcr); @@ -1112,9 +1112,9 @@ static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); if (pcr->ops->force_power_down) - pcr->ops->force_power_down(pcr, pm_state); + pcr->ops->force_power_down(pcr, pm_state, runtime); else - rtsx_base_force_power_down(pcr, pm_state); + rtsx_base_force_power_down(pcr); } void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) @@ -1669,7 +1669,7 @@ static int __maybe_unused rtsx_pci_suspend(struct device *dev_d) mutex_lock(&pcr->pcr_mutex); - rtsx_pci_power_off(pcr, HOST_ENTER_S3); + rtsx_pci_power_off(pcr, HOST_ENTER_S3, false); mutex_unlock(&pcr->pcr_mutex); return 0; @@ -1708,7 +1708,7 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev) dev_dbg(&(pcidev->dev), "--> %s\n", __func__); - rtsx_pci_power_off(pcr, HOST_ENTER_S1); + rtsx_pci_power_off(pcr, HOST_ENTER_S1, false); pci_disable_device(pcidev); free_irq(pcr->irq, (void *)pcr); @@ -1754,7 +1754,7 @@ static int rtsx_pci_runtime_suspend(struct device *device) cancel_delayed_work_sync(&pcr->carddet_work); mutex_lock(&pcr->pcr_mutex); - rtsx_pci_power_off(pcr, HOST_ENTER_S3); + rtsx_pci_power_off(pcr, HOST_ENTER_S3, true); mutex_unlock(&pcr->pcr_mutex); diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index daf057c4eea6..aa0ebd667227 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -25,6 +25,7 @@ #define REG_EFUSE_POWEROFF 0x00 #define RTS5250_CLK_CFG3 0xFF79 #define RTS525A_CFG_MEM_PD 0xF0 +#define RTS524A_AUTOLOAD_CFG1 0xFF7C #define RTS524A_PM_CTRL3 0xFF7E #define RTS525A_BIOS_CFG 0xFF2D #define RTS525A_LOAD_BIOS_FLAG 0x01 diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 89b7d34e25b6..3d780b44e678 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1095,7 +1095,7 @@ struct pcr_ops { unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); int (*conv_clk_and_div_n)(int clk, int dir); void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); - void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); + void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime); void (*stop_cmd)(struct rtsx_pcr *pcr); void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); -- cgit v1.2.3-71-gd317 From d7e4f8545b497b3f5687e592f1c355cbaee64c8c Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Wed, 26 Jan 2022 13:04:26 +0800 Subject: pid: Introduce helper task_is_in_init_pid_ns() Currently the kernel uses open code in multiple places to check if a task is in the root PID namespace with the kind of format: if (task_active_pid_ns(current) == &init_pid_ns) do_something(); This patch creates a new helper function, task_is_in_init_pid_ns(), it returns true if a passed task is in the root PID namespace, otherwise returns false. So it will be used to replace open codes. Suggested-by: Suzuki K Poulose Signed-off-by: Leo Yan Reviewed-by: Leon Romanovsky Acked-by: Suzuki K Poulose Acked-by: Balbir Singh Signed-off-by: Jakub Kicinski --- include/linux/pid_namespace.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 7c7e627503d2..07481bb87d4e 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -86,4 +86,9 @@ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pid_idr_init(void); +static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) +{ + return task_active_pid_ns(tsk) == &init_pid_ns; +} + #endif /* _LINUX_PID_NS_H */ -- cgit v1.2.3-71-gd317 From 8033c6c2fed235b3d571b5a5ede302b752bc5c7d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 10:54:12 -0800 Subject: bpf: remove unused static inlines Remove two dead stubs, sk_msg_clear_meta() was never used, use of xskq_cons_is_full() got replaced by xsk_tx_writeable() in v5.10. Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20220126185412.2776254-1-kuba@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 10 ---------- include/linux/skmsg.h | 5 ----- net/xdp/xsk_queue.h | 7 ------- 3 files changed, 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 394305a5e02f..2344f793c4dc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1875,11 +1875,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags) return -EOPNOTSUPP; } -static inline bool dev_map_can_have_prog(struct bpf_map *map) -{ - return false; -} - static inline void __dev_flush(void) { } @@ -1943,11 +1938,6 @@ static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, return -EOPNOTSUPP; } -static inline bool cpu_map_prog_allowed(struct bpf_map *map) -{ - return false; -} - static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 18a717fe62eb..ddde5f620901 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -171,11 +171,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end) #define sk_msg_iter_next(msg, which) \ sk_msg_iter_var_next(msg->sg.which) -static inline void sk_msg_clear_meta(struct sk_msg *msg) -{ - memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); -} - static inline void sk_msg_init(struct sk_msg *msg) { BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index e9aa2c236356..79be3e53ddf1 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -304,13 +304,6 @@ static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) q->cached_cons += cnt; } -static inline bool xskq_cons_is_full(struct xsk_queue *q) -{ - /* No barriers needed since data is not accessed */ - return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == - q->nentries; -} - static inline u32 xskq_cons_present_entries(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ -- cgit v1.2.3-71-gd317 From 27599aacbaefcbf2af7b06b0029459bbf682000d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 25 Jan 2022 10:12:18 +0100 Subject: fbdev: Hot-unplug firmware fb devices on forced removal Hot-unplug all firmware-framebuffer devices as part of removing them via remove_conflicting_framebuffers() et al. Releases all memory regions to be acquired by native drivers. Firmware, such as EFI, install a framebuffer while posting the computer. After removing the firmware-framebuffer device from fbdev, a native driver takes over the hardware and the firmware framebuffer becomes invalid. Firmware-framebuffer drivers, specifically simplefb, don't release their device from Linux' device hierarchy. It still owns the firmware framebuffer and blocks the native drivers from loading. This has been observed in the vmwgfx driver. [1] Initiating a device removal (i.e., hot unplug) as part of remove_conflicting_framebuffers() removes the underlying device and returns the memory range to the system. [1] https://lore.kernel.org/dri-devel/20220117180359.18114-1-zack@kde.org/ v2: * rename variable 'dev' to 'device' (Javier) Signed-off-by: Thomas Zimmermann Reported-by: Zack Rusin Reviewed-by: Javier Martinez Canillas Reviewed-by: Zack Rusin Reviewed-by: Hans de Goede CC: stable@vger.kernel.org # v5.11+ Link: https://patchwork.freedesktop.org/patch/msgid/20220125091222.21457-2-tzimmermann@suse.de --- drivers/video/fbdev/core/fbmem.c | 29 ++++++++++++++++++++++++++--- include/linux/fb.h | 1 + 2 files changed, 27 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 826175ad88a2..45329dbfe617 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1557,18 +1558,36 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, /* check all firmware fbs and kick off if the base addr overlaps */ for_each_registered_fb(i) { struct apertures_struct *gen_aper; + struct device *device; if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) continue; gen_aper = registered_fb[i]->apertures; + device = registered_fb[i]->device; if (fb_do_apertures_overlap(gen_aper, a) || (primary && gen_aper && gen_aper->count && gen_aper->ranges[0].base == VGA_FB_PHYS)) { printk(KERN_INFO "fb%d: switching to %s from %s\n", i, name, registered_fb[i]->fix.id); - do_unregister_framebuffer(registered_fb[i]); + + /* + * If we kick-out a firmware driver, we also want to remove + * the underlying platform device, such as simple-framebuffer, + * VESA, EFI, etc. A native driver will then be able to + * allocate the memory range. + * + * If it's not a platform device, at least print a warning. A + * fix would add code to remove the device from the system. + */ + if (dev_is_platform(device)) { + registered_fb[i]->forced_out = true; + platform_device_unregister(to_platform_device(device)); + } else { + pr_warn("fb%d: cannot remove device\n", i); + do_unregister_framebuffer(registered_fb[i]); + } } } } @@ -1851,9 +1870,13 @@ EXPORT_SYMBOL(register_framebuffer); void unregister_framebuffer(struct fb_info *fb_info) { - mutex_lock(®istration_lock); + bool forced_out = fb_info->forced_out; + + if (!forced_out) + mutex_lock(®istration_lock); do_unregister_framebuffer(fb_info); - mutex_unlock(®istration_lock); + if (!forced_out) + mutex_unlock(®istration_lock); } EXPORT_SYMBOL(unregister_framebuffer); diff --git a/include/linux/fb.h b/include/linux/fb.h index 6f3db99ab990..ac294fc4c07b 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -502,6 +502,7 @@ struct fb_info { } *apertures; bool skip_vt_switch; /* no VT switch on suspend/resume required */ + bool forced_out; /* set when being removed by another driver */ }; static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { -- cgit v1.2.3-71-gd317 From ec2444530612a886b406e2830d7f314d1a07d4bb Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 19 Jan 2022 14:39:39 -0800 Subject: psi: Fix "no previous prototype" warnings when CONFIG_CGROUPS=n When CONFIG_CGROUPS is disabled psi code generates the following warnings: kernel/sched/psi.c:1112:21: warning: no previous prototype for 'psi_trigger_create' [-Wmissing-prototypes] 1112 | struct psi_trigger *psi_trigger_create(struct psi_group *group, | ^~~~~~~~~~~~~~~~~~ kernel/sched/psi.c:1182:6: warning: no previous prototype for 'psi_trigger_destroy' [-Wmissing-prototypes] 1182 | void psi_trigger_destroy(struct psi_trigger *t) | ^~~~~~~~~~~~~~~~~~~ kernel/sched/psi.c:1249:10: warning: no previous prototype for 'psi_trigger_poll' [-Wmissing-prototypes] 1249 | __poll_t psi_trigger_poll(void **trigger_ptr, | ^~~~~~~~~~~~~~~~ Change declarations of these functions in the header to provide the prototypes even when they are unused. Fixes: 0e94682b73bf ("psi: introduce psi monitor") Reported-by: kernel test robot Signed-off-by: Suren Baghdasaryan Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220119223940.787748-2-surenb@google.com --- include/linux/psi.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/psi.h b/include/linux/psi.h index a70ca833c6d7..827970278d62 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -25,18 +25,17 @@ void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); - -#ifdef CONFIG_CGROUPS -int psi_cgroup_alloc(struct cgroup *cgrp); -void psi_cgroup_free(struct cgroup *cgrp); -void cgroup_move_task(struct task_struct *p, struct css_set *to); - struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); + +#ifdef CONFIG_CGROUPS +int psi_cgroup_alloc(struct cgroup *cgrp); +void psi_cgroup_free(struct cgroup *cgrp); +void cgroup_move_task(struct task_struct *p, struct css_set *to); #endif #else /* CONFIG_PSI */ -- cgit v1.2.3-71-gd317 From bd5daba2d02479729526d35de85807aadf6fba20 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:10:55 -0800 Subject: mii: remove mii_lpa_to_linkmode_lpa_sgmii() The only caller of mii_lpa_to_linkmode_lpa_sgmii() disappeared in v5.10. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/mii.h | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mii.h b/include/linux/mii.h index 12ea29e04293..b8a1a17a87dd 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -387,23 +387,6 @@ mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa) speed_duplex == LPA_SGMII_10FULL); } -/** - * mii_lpa_to_linkmode_adv_sgmii - * @advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings when in SGMII mode. - * Clears the old value of advertising. - */ -static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising, - u32 lpa) -{ - linkmode_zero(lp_advertising); - - mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa); -} - /** * mii_adv_mod_linkmode_adv_t * @advertising:pointer to destination link mode. -- cgit v1.2.3-71-gd317 From b1755400b4be33dbd286272b153579631be2e2ca Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:10:57 -0800 Subject: net: remove net_invalid_timestamp() No callers since v3.15. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8131d0de7559..60bd2347708c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3899,11 +3899,6 @@ static inline ktime_t net_timedelta(ktime_t t) return ktime_sub(ktime_get_real(), t); } -static inline ktime_t net_invalid_timestamp(void) -{ - return 0; -} - static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; -- cgit v1.2.3-71-gd317 From 08dfa5a19e1f4344ce5d3a5eed4c5529adafe0dc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:10:58 -0800 Subject: net: remove linkmode_change_bit() No callers since v5.7, the initial use case seems pretty esoteric so removing this should not harm the completeness of the API. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/linkmode.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index f8397f300fcd..15e0e0209da4 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -66,11 +66,6 @@ static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, linkmode_clear_bit(nr, addr); } -static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) -{ - __change_bit(nr, addr); -} - static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) { return test_bit(nr, addr); -- cgit v1.2.3-71-gd317 From 8b2d546e23bb3588f897089368b5f09e49f7762d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:11:02 -0800 Subject: ipv6: remove inet6_rsk() and tcp_twsk_ipv6only() The stubs under !CONFIG_IPV6 were missed when real functions got deleted ca. v3.13. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/ipv6.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a59d25f19385..1e0f8a31f3de 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -371,19 +371,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) return NULL; } -static inline struct inet6_request_sock * - inet6_rsk(const struct request_sock *rsk) -{ - return NULL; -} - static inline struct raw6_sock *raw6_sk(const struct sock *sk) { return NULL; } #define inet6_rcv_saddr(__sk) NULL -#define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* _IPV6_H */ -- cgit v1.2.3-71-gd317 From cc81df835c25b108248bad24021a21e77cbb84ac Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:11:04 -0800 Subject: udp: remove inner_udp_hdr() Not used since added in v3.8. Signed-off-by: Jakub Kicinski Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/udp.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/udp.h b/include/linux/udp.h index ae66dadd8543..254a2654400f 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -23,11 +23,6 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) return (struct udphdr *)skb_transport_header(skb); } -static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) -{ - return (struct udphdr *)skb_inner_transport_header(skb); -} - #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) -- cgit v1.2.3-71-gd317 From d59a67f2f3f39012271ed3d11c338706a011c5c2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 11:11:06 -0800 Subject: netlink: remove nl_set_extack_cookie_u32() Not used since v5.10. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/netlink.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 1ec631838af9..bda1c385cffb 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -135,15 +135,6 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, extack->cookie_len = sizeof(cookie); } -static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, - u32 cookie) -{ - if (!extack) - return; - memcpy(extack->cookie, &cookie, sizeof(cookie)); - extack->cookie_len = sizeof(cookie); -} - void netlink_kernel_release(struct sock *sk); int __netlink_change_ngroups(struct sock *sk, unsigned int groups); int netlink_change_ngroups(struct sock *sk, unsigned int groups); -- cgit v1.2.3-71-gd317 From 364df53c081d93fcfd6b91085ff2650c7f17b3c7 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 27 Jan 2022 17:13:01 +0800 Subject: net: socket: rename SKB_DROP_REASON_SOCKET_FILTER Rename SKB_DROP_REASON_SOCKET_FILTER, which is used as the reason of skb drop out of socket filter before it's part of a released kernel. It will be used for more protocols than just TCP in future series. Signed-off-by: Menglong Dong Reviewed-by: David Ahern Link: https://lore.kernel.org/all/20220127091308.91401-2-imagedong@tencent.com/ Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 2 +- include/trace/events/skb.h | 2 +- net/ipv4/tcp_ipv4.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bf11e1fbd69b..8a636e678902 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -318,7 +318,7 @@ enum skb_drop_reason { SKB_DROP_REASON_NO_SOCKET, SKB_DROP_REASON_PKT_TOO_SMALL, SKB_DROP_REASON_TCP_CSUM, - SKB_DROP_REASON_TCP_FILTER, + SKB_DROP_REASON_SOCKET_FILTER, SKB_DROP_REASON_UDP_CSUM, SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 3e042ca2cedb..a8a64b97504d 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -14,7 +14,7 @@ EM(SKB_DROP_REASON_NO_SOCKET, NO_SOCKET) \ EM(SKB_DROP_REASON_PKT_TOO_SMALL, PKT_TOO_SMALL) \ EM(SKB_DROP_REASON_TCP_CSUM, TCP_CSUM) \ - EM(SKB_DROP_REASON_TCP_FILTER, TCP_FILTER) \ + EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \ EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \ EMe(SKB_DROP_REASON_MAX, MAX) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b3f34e366b27..938b59636578 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2095,7 +2095,7 @@ process: nf_reset_ct(skb); if (tcp_filter(sk, skb)) { - drop_reason = SKB_DROP_REASON_TCP_FILTER; + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto discard_and_relse; } th = (const struct tcphdr *)skb->data; -- cgit v1.2.3-71-gd317 From 46531a30364bd483bfa1b041c15d42a196e77e93 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 27 Jan 2022 14:09:13 +0000 Subject: cgroup/bpf: fast path skb BPF filtering Even though there is a static key protecting from overhead from cgroup-bpf skb filtering when there is nothing attached, in many cases it's not enough as registering a filter for one type will ruin the fast path for all others. It's observed in production servers I've looked at but also in laptops, where registration is done during init by systemd or something else. Add a per-socket fast path check guarding from such overhead. This affects both receive and transmit paths of TCP, UDP and other protocols. It showed ~1% tx/s improvement in small payload UDP send benchmarks using a real NIC and in a server environment and the number jumps to 2-3% for preemtible kernels. Reviewed-by: Stanislav Fomichev Signed-off-by: Pavel Begunkov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/d8c58857113185a764927a46f4b5a058d36d3ec3.1643292455.git.asml.silence@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf-cgroup.h | 24 ++++++++++++++++++++---- include/linux/bpf.h | 13 +++++++++++++ kernel/bpf/cgroup.c | 30 ------------------------------ kernel/bpf/core.c | 16 ++++------------ 4 files changed, 37 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index b525d8cdc25b..88a51b242adc 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -8,6 +8,7 @@ #include #include #include +#include #include struct sock; @@ -165,11 +166,23 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, void *value, u64 flags); +/* Opportunistic check to see whether we have any BPF program attached*/ +static inline bool cgroup_bpf_sock_enabled(struct sock *sk, + enum cgroup_bpf_attach_type type) +{ + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); + struct bpf_prog_array *array; + + array = rcu_access_pointer(cgrp->bpf.effective[type]); + return array != &bpf_empty_prog_array.hdr; +} + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \ + if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \ + cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ CGROUP_INET_INGRESS); \ \ @@ -181,7 +194,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk)) \ + if (sk_fullsock(__sk) && \ + cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ } \ @@ -347,7 +361,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \ + cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -367,7 +382,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \ + cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ tcp_bpf_bypass_getsockopt, \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2344f793c4dc..e3b82ce51445 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1233,6 +1233,19 @@ struct bpf_prog_array { struct bpf_prog_array_item items[]; }; +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; +}; + +/* to avoid allocating empty bpf_prog_array for cgroups that + * don't have bpf program attached use one global 'bpf_empty_prog_array' + * It will not be modified the caller of bpf_prog_array_alloc() + * (since caller requested prog_cnt == 0) + * that pointer should be 'freed' by bpf_prog_array_free() + */ +extern struct bpf_empty_prog_array bpf_empty_prog_array; + struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); void bpf_prog_array_free(struct bpf_prog_array *progs); int bpf_prog_array_length(struct bpf_prog_array *progs); diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 279ebbed75a5..098632fdbc45 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1384,20 +1384,6 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, } #ifdef CONFIG_NET -static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, - enum cgroup_bpf_attach_type attach_type) -{ - struct bpf_prog_array *prog_array; - bool empty; - - rcu_read_lock(); - prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); - empty = bpf_prog_array_is_empty(prog_array); - rcu_read_unlock(); - - return empty; -} - static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen, struct bpf_sockopt_buf *buf) { @@ -1456,19 +1442,11 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, }; int ret, max_optlen; - /* Opportunistic check to see whether we have any BPF program - * attached to the hook so we don't waste time allocating - * memory and locking the socket. - */ - if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT)) - return 0; - /* Allocate a bit more than the initial user buffer for * BPF program. The canonical use case is overriding * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). */ max_optlen = max_t(int, 16, *optlen); - max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); if (max_optlen < 0) return max_optlen; @@ -1550,15 +1528,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, }; int ret; - /* Opportunistic check to see whether we have any BPF program - * attached to the hook so we don't waste time allocating - * memory and locking the socket. - */ - if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT)) - return retval; - ctx.optlen = max_optlen; - max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); if (max_optlen < 0) return max_optlen; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 0a1cfd8544b9..04a8d5bea552 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1968,18 +1968,10 @@ static struct bpf_prog_dummy { }, }; -/* to avoid allocating empty bpf_prog_array for cgroups that - * don't have bpf program attached use one global 'empty_prog_array' - * It will not be modified the caller of bpf_prog_array_alloc() - * (since caller requested prog_cnt == 0) - * that pointer should be 'freed' by bpf_prog_array_free() - */ -static struct { - struct bpf_prog_array hdr; - struct bpf_prog *null_prog; -} empty_prog_array = { +struct bpf_empty_prog_array bpf_empty_prog_array = { .null_prog = NULL, }; +EXPORT_SYMBOL(bpf_empty_prog_array); struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) { @@ -1989,12 +1981,12 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) (prog_cnt + 1), flags); - return &empty_prog_array.hdr; + return &bpf_empty_prog_array.hdr; } void bpf_prog_array_free(struct bpf_prog_array *progs) { - if (!progs || progs == &empty_prog_array.hdr) + if (!progs || progs == &bpf_empty_prog_array.hdr) return; kfree_rcu(progs, rcu); } -- cgit v1.2.3-71-gd317 From 7472d5a642c94a0ee1882ff3038de72ffe803a01 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 27 Jan 2022 07:46:00 -0800 Subject: compiler_types: define __user as __attribute__((btf_type_tag("user"))) The __user attribute is currently mainly used by sparse for type checking. The attribute indicates whether a memory access is in user memory address space or not. Such information is important during tracing kernel internal functions or data structures as accessing user memory often has different mechanisms compared to accessing kernel memory. For example, the perf-probe needs explicit command line specification to indicate a particular argument or string in user-space memory ([1], [2], [3]). Currently, vmlinux BTF is available in kernel with many distributions. If __user attribute information is available in vmlinux BTF, the explicit user memory access information from users will not be necessary as the kernel can figure it out by itself with vmlinux BTF. Besides the above possible use for perf/probe, another use case is for bpf verifier. Currently, for bpf BPF_PROG_TYPE_TRACING type of bpf programs, users can write direct code like p->m1->m2 and "p" could be a function parameter. Without __user information in BTF, the verifier will assume p->m1 accessing kernel memory and will generate normal loads. Let us say "p" actually tagged with __user in the source code. In such cases, p->m1 is actually accessing user memory and direct load is not right and may produce incorrect result. For such cases, bpf_probe_read_user() will be the correct way to read p->m1. To support encoding __user information in BTF, a new attribute __attribute__((btf_type_tag(""))) is implemented in clang ([4]). For example, if we have #define __user __attribute__((btf_type_tag("user"))) during kernel compilation, the attribute "user" information will be preserved in dwarf. After pahole converting dwarf to BTF, __user information will be available in vmlinux BTF. The following is an example with latest upstream clang (clang14) and pahole 1.23: [$ ~] cat test.c #define __user __attribute__((btf_type_tag("user"))) int foo(int __user *arg) { return *arg; } [$ ~] clang -O2 -g -c test.c [$ ~] pahole -JV test.o ... [1] INT int size=4 nr_bits=32 encoding=SIGNED [2] TYPE_TAG user type_id=1 [3] PTR (anon) type_id=2 [4] FUNC_PROTO (anon) return=1 args=(3 arg) [5] FUNC foo type_id=4 [$ ~] You can see for the function argument "int __user *arg", its type is described as PTR -> TYPE_TAG(user) -> INT The kernel can use this information for bpf verification or other use cases. Current btf_type_tag is only supported in clang (>= clang14) and pahole (>= 1.23). gcc support is also proposed and under development ([5]). [1] http://lkml.kernel.org/r/155789874562.26965.10836126971405890891.stgit@devnote2 [2] http://lkml.kernel.org/r/155789872187.26965.4468456816590888687.stgit@devnote2 [3] http://lkml.kernel.org/r/155789871009.26965.14167558859557329331.stgit@devnote2 [4] https://reviews.llvm.org/D111199 [5] https://lore.kernel.org/bpf/0cbeb2fb-1a18-f690-e360-24b1c90c2a91@fb.com/ Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20220127154600.652613-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/compiler_types.h | 3 +++ lib/Kconfig.debug | 8 ++++++++ 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3c1795fdb568..3f31ff400432 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -31,6 +31,9 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __kernel # ifdef STRUCTLEAK_PLUGIN # define __user __attribute__((user)) +# elif defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define __user __attribute__((btf_type_tag("user"))) # else # define __user # endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 14b89aa37c5c..6159859769fa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -325,6 +325,14 @@ config DEBUG_INFO_BTF config PAHOLE_HAS_SPLIT_BTF def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119") +config PAHOLE_HAS_BTF_TAG + def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "123") + depends on CC_IS_CLANG + help + Decide whether pahole emits btf_tag attributes (btf_type_tag and + btf_decl_tag) or not. Currently only clang compiler implements + these attributes, so make the config depend on CC_IS_CLANG. + config DEBUG_INFO_BTF_MODULES def_bool y depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF -- cgit v1.2.3-71-gd317 From c6f1bfe89ac95dc829dcb4ed54780da134ac5fce Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 27 Jan 2022 07:46:06 -0800 Subject: bpf: reject program if a __user tagged memory accessed in kernel way BPF verifier supports direct memory access for BPF_PROG_TYPE_TRACING type of bpf programs, e.g., a->b. If "a" is a pointer pointing to kernel memory, bpf verifier will allow user to write code in C like a->b and the verifier will translate it to a kernel load properly. If "a" is a pointer to user memory, it is expected that bpf developer should be bpf_probe_read_user() helper to get the value a->b. Without utilizing BTF __user tagging information, current verifier will assume that a->b is a kernel memory access and this may generate incorrect result. Now BTF contains __user information, it can check whether the pointer points to a user memory or not. If it is, the verifier can reject the program and force users to use bpf_probe_read_user() helper explicitly. In the future, we can easily extend btf_add_space for other address space tagging, for example, rcu/percpu etc. Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20220127154606.654961-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 9 ++++++--- include/linux/btf.h | 5 +++++ kernel/bpf/btf.c | 34 ++++++++++++++++++++++++++++------ kernel/bpf/verifier.c | 35 ++++++++++++++++++++++++----------- net/bpf/bpf_dummy_struct_ops.c | 6 ++++-- net/ipv4/bpf_tcp_ca.c | 6 ++++-- 6 files changed, 71 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e3b82ce51445..6eb0b180d33b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -332,7 +332,10 @@ enum bpf_type_flag { */ MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), - __BPF_TYPE_LAST_FLAG = MEM_ALLOC, + /* MEM is in user address space. */ + MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_USER, }; /* Max number of base types. */ @@ -588,7 +591,7 @@ struct bpf_verifier_ops { const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id); + u32 *next_btf_id, enum bpf_type_flag *flag); }; struct bpf_prog_offload_ops { @@ -1780,7 +1783,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size, int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id); + u32 *next_btf_id, enum bpf_type_flag *flag); bool btf_struct_ids_match(struct bpf_verifier_log *log, const struct btf *btf, u32 id, int off, const struct btf *need_btf, u32 need_type_id); diff --git a/include/linux/btf.h b/include/linux/btf.h index b12cfe3b12bb..f6c43dd513fa 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -238,6 +238,11 @@ static inline bool btf_type_is_var(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_VAR; } +static inline bool btf_type_is_type_tag(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG; +} + /* union is only a special case of struct: * all its offsetof(member) == 0 */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index b2a248956100..b983cee8d196 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4886,6 +4886,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, const char *tname = prog->aux->attach_func_name; struct bpf_verifier_log *log = info->log; const struct btf_param *args; + const char *tag_value; u32 nr_args, arg; int i, ret; @@ -5038,6 +5039,13 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, info->btf = btf; info->btf_id = t->type; t = btf_type_by_id(btf, t->type); + + if (btf_type_is_type_tag(t)) { + tag_value = __btf_name_by_offset(btf, t->name_off); + if (strcmp(tag_value, "user") == 0) + info->reg_type |= MEM_USER; + } + /* skip modifiers */ while (btf_type_is_modifier(t)) { info->btf_id = t->type; @@ -5064,12 +5072,12 @@ enum bpf_struct_walk_result { static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, - u32 *next_btf_id) + u32 *next_btf_id, enum bpf_type_flag *flag) { u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; const struct btf_type *mtype, *elem_type = NULL; const struct btf_member *member; - const char *tname, *mname; + const char *tname, *mname, *tag_value; u32 vlen, elem_id, mid; again: @@ -5253,7 +5261,8 @@ error: } if (btf_type_is_ptr(mtype)) { - const struct btf_type *stype; + const struct btf_type *stype, *t; + enum bpf_type_flag tmp_flag = 0; u32 id; if (msize != size || off != moff) { @@ -5262,9 +5271,19 @@ error: mname, moff, tname, off, size); return -EACCES; } + + /* check __user tag */ + t = btf_type_by_id(btf, mtype->type); + if (btf_type_is_type_tag(t)) { + tag_value = __btf_name_by_offset(btf, t->name_off); + if (strcmp(tag_value, "user") == 0) + tmp_flag = MEM_USER; + } + stype = btf_type_skip_modifiers(btf, mtype->type, &id); if (btf_type_is_struct(stype)) { *next_btf_id = id; + *flag = tmp_flag; return WALK_PTR; } } @@ -5291,13 +5310,14 @@ error: int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype __maybe_unused, - u32 *next_btf_id) + u32 *next_btf_id, enum bpf_type_flag *flag) { + enum bpf_type_flag tmp_flag = 0; int err; u32 id; do { - err = btf_struct_walk(log, btf, t, off, size, &id); + err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); switch (err) { case WALK_PTR: @@ -5305,6 +5325,7 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, * we're done. */ *next_btf_id = id; + *flag = tmp_flag; return PTR_TO_BTF_ID; case WALK_SCALAR: return SCALAR_VALUE; @@ -5349,6 +5370,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log, const struct btf *need_btf, u32 need_type_id) { const struct btf_type *type; + enum bpf_type_flag flag; int err; /* Are we already done? */ @@ -5359,7 +5381,7 @@ again: type = btf_type_by_id(btf, id); if (!type) return false; - err = btf_struct_walk(log, btf, type, off, 1, &id); + err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); if (err != WALK_STRUCT) return false; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dcf065ec2774..1ae41d0cf96c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -536,7 +536,7 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn) static const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type) { - char postfix[16] = {0}, prefix[16] = {0}; + char postfix[16] = {0}, prefix[32] = {0}; static const char * const str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", @@ -570,9 +570,11 @@ static const char *reg_type_str(struct bpf_verifier_env *env, } if (type & MEM_RDONLY) - strncpy(prefix, "rdonly_", 16); + strncpy(prefix, "rdonly_", 32); if (type & MEM_ALLOC) - strncpy(prefix, "alloc_", 16); + strncpy(prefix, "alloc_", 32); + if (type & MEM_USER) + strncpy(prefix, "user_", 32); snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", prefix, str[base_type(type)], postfix); @@ -1547,14 +1549,15 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, static void mark_btf_ld_reg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, enum bpf_reg_type reg_type, - struct btf *btf, u32 btf_id) + struct btf *btf, u32 btf_id, + enum bpf_type_flag flag) { if (reg_type == SCALAR_VALUE) { mark_reg_unknown(env, regs, regno); return; } mark_reg_known_zero(env, regs, regno); - regs[regno].type = PTR_TO_BTF_ID; + regs[regno].type = PTR_TO_BTF_ID | flag; regs[regno].btf = btf; regs[regno].btf_id = btf_id; } @@ -4152,6 +4155,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg = regs + regno; const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); const char *tname = btf_name_by_offset(reg->btf, t->name_off); + enum bpf_type_flag flag = 0; u32 btf_id; int ret; @@ -4171,9 +4175,16 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, return -EACCES; } + if (reg->type & MEM_USER) { + verbose(env, + "R%d is ptr_%s access user memory: off=%d\n", + regno, tname, off); + return -EACCES; + } + if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, reg->btf, t, - off, size, atype, &btf_id); + off, size, atype, &btf_id, &flag); } else { if (atype != BPF_READ) { verbose(env, "only read is supported\n"); @@ -4181,14 +4192,14 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, } ret = btf_struct_access(&env->log, reg->btf, t, off, size, - atype, &btf_id); + atype, &btf_id, &flag); } if (ret < 0) return ret; if (atype == BPF_READ && value_regno >= 0) - mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id); + mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); return 0; } @@ -4201,6 +4212,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env, { struct bpf_reg_state *reg = regs + regno; struct bpf_map *map = reg->map_ptr; + enum bpf_type_flag flag = 0; const struct btf_type *t; const char *tname; u32 btf_id; @@ -4238,12 +4250,12 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env, return -EACCES; } - ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id); + ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag); if (ret < 0) return ret; if (value_regno >= 0) - mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id); + mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); return 0; } @@ -4444,7 +4456,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (err < 0) return err; - err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id); + err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, + &btf_id); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index fbc896323bec..d0e54e30658a 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -145,7 +145,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id) + u32 *next_btf_id, + enum bpf_type_flag *flag) { const struct btf_type *state; s32 type_id; @@ -162,7 +163,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log, return -EACCES; } - err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id); + err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id, + flag); if (err < 0) return err; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index b60c9fd7147e..f79ab942f03b 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -96,12 +96,14 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id) + u32 *next_btf_id, + enum bpf_type_flag *flag) { size_t end; if (atype == BPF_READ) - return btf_struct_access(log, btf, t, off, size, atype, next_btf_id); + return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, + flag); if (t != tcp_sock_type) { bpf_log(log, "only read is supported\n"); -- cgit v1.2.3-71-gd317 From 9059b04b4108be71397941f4665d5aa79783125a Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 9 Jan 2022 21:46:34 +0200 Subject: net/mlx5: Remove unused TIR modify bitmask enums struct mlx5_ifc_modify_tir_bitmask_bits is used for the bitmask of MODIFY_TIR operations. Remove the unused bitmask enums. Signed-off-by: Tariq Toukan Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 598ac3bcc901..27145c4d6820 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -63,13 +63,6 @@ enum { MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21 }; -enum { - MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, - MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, - MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, - MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 -}; - enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, -- cgit v1.2.3-71-gd317 From b5b3d10ef638fcadc56609961875bf157a92aaa4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 27 Jan 2022 08:33:49 -0800 Subject: net: mii: remove mii_lpa_mod_linkmode_lpa_sgmii() Vladimir points out that since we removed mii_lpa_to_linkmode_lpa_sgmii(), mii_lpa_mod_linkmode_lpa_sgmii() is also no longer called. Suggested-by: Vladimir Oltean Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/mii.h | 33 --------------------------------- 1 file changed, 33 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mii.h b/include/linux/mii.h index b8a1a17a87dd..5ee13083cec7 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -354,39 +354,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) return result; } -/** - * mii_lpa_mod_linkmode_adv_sgmii - * @lp_advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_LPA bits to - * linkmode advertisement settings for SGMII. - * Leaves other bits unchanged. - */ -static inline void -mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa) -{ - u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10FULL); -} - /** * mii_adv_mod_linkmode_adv_t * @advertising:pointer to destination link mode. -- cgit v1.2.3-71-gd317 From 9690ae60429020f38e4aa2540c306f27eb021bc0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 27 Jan 2022 10:42:59 -0800 Subject: ethtool: add header/data split indication For applications running on a mix of platforms it's useful to have a clear indication whether host's NIC supports the geometry requirements of TCP zero-copy. TCP zero-copy Rx requires data to be neatly placed into memory pages. Most NICs can't do that. This patch is adding GET support only, since the NICs I work with either always have the feature enabled or enable it whenever MTU is set to jumbo. In other words I don't need SET. But adding set should be trivial. (The only note on SET is that we will likely want the setting to be "sticky" and use 0 / `unknown` to reset it back to driver default.) Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 8 ++++++++ include/linux/ethtool.h | 2 ++ include/uapi/linux/ethtool_netlink.h | 7 +++++++ net/ethtool/rings.c | 15 ++++++++++----- 4 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 9d98e0511249..cae28af7a476 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -860,8 +860,16 @@ Kernel response contents: ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring + ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split ==================================== ====== =========================== +``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with +page-flipping TCP zero-copy receive (``getsockopt(TCP_ZEROCOPY_RECEIVE)``). +If enabled the device is configured to place frame headers and data into +separate buffers. The device configuration must make it possible to receive +full memory pages of data, for example because MTU is high enough or through +HW-GRO. + RINGS_SET ========= diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 11efc45de66a..e0853f48b75e 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -70,9 +70,11 @@ enum { /** * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. + * @tcp_data_split: Scatter packet headers and data to separate buffers */ struct kernel_ethtool_ringparam { u32 rx_buf_len; + u8 tcp_data_split; }; /** diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index cca6e474a085..417d4280d7b5 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -318,6 +318,12 @@ enum { /* RINGS */ +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED, + ETHTOOL_TCP_DATA_SPLIT_ENABLED, +}; + enum { ETHTOOL_A_RINGS_UNSPEC, ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */ @@ -330,6 +336,7 @@ enum { ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */ ETHTOOL_A_RINGS_TX, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ + ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index c1d5f5e0fdc9..18a5035d3bee 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -53,7 +53,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ nla_total_size(sizeof(u32)) + /* _RINGS_TX */ - nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ + nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */ } static int rings_fill_reply(struct sk_buff *skb, @@ -61,9 +62,11 @@ static int rings_fill_reply(struct sk_buff *skb, const struct ethnl_reply_data *reply_base) { const struct rings_reply_data *data = RINGS_REPDATA(reply_base); - const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam; + const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; const struct ethtool_ringparam *ringparam = &data->ringparam; + WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); + if ((ringparam->rx_max_pending && (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, ringparam->rx_max_pending) || @@ -84,9 +87,11 @@ static int rings_fill_reply(struct sk_buff *skb, ringparam->tx_max_pending) || nla_put_u32(skb, ETHTOOL_A_RINGS_TX, ringparam->tx_pending))) || - (kernel_ringparam->rx_buf_len && - (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, - kernel_ringparam->rx_buf_len)))) + (kr->rx_buf_len && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || + (kr->tcp_data_split && + (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, + kr->tcp_data_split)))) return -EMSGSIZE; return 0; -- cgit v1.2.3-71-gd317 From 6cdef8a6ee748ec522aabee049633b16b4115f73 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 27 Jan 2022 12:09:35 -0800 Subject: SUNRPC: add netns refcount tracker to struct svc_xprt struct svc_xprt holds a long lived reference to a netns, it is worth tracking it. Signed-off-by: Eric Dumazet Acked-by: Chuck Lever Signed-off-by: David S. Miller --- include/linux/sunrpc/svc_xprt.h | 1 + net/sunrpc/svc_xprt.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 571f605bc91e..382af90320ac 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -88,6 +88,7 @@ struct svc_xprt { struct list_head xpt_users; /* callbacks on free */ struct net *xpt_net; + netns_tracker ns_tracker; const struct cred *xpt_cred; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b21ad7994147..db878e833b67 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -162,7 +162,7 @@ static void svc_xprt_free(struct kref *kref) if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) svcauth_unix_info_release(xprt); put_cred(xprt->xpt_cred); - put_net(xprt->xpt_net); + put_net_track(xprt->xpt_net, &xprt->ns_tracker); /* See comment on corresponding get in xs_setup_bc_tcp(): */ if (xprt->xpt_bc_xprt) xprt_put(xprt->xpt_bc_xprt); @@ -198,7 +198,7 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, mutex_init(&xprt->xpt_mutex); spin_lock_init(&xprt->xpt_lock); set_bit(XPT_BUSY, &xprt->xpt_flags); - xprt->xpt_net = get_net(net); + xprt->xpt_net = get_net_track(net, &xprt->ns_tracker, GFP_ATOMIC); strcpy(xprt->xpt_remotebuf, "uninitialized"); } EXPORT_SYMBOL_GPL(svc_xprt_init); -- cgit v1.2.3-71-gd317 From b9a0d6d143ec63132beff04802578c499083f87c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 27 Jan 2022 12:09:37 -0800 Subject: SUNRPC: add netns refcount tracker to struct rpc_xprt Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 955ea4d7af0b..3cdc8d878d81 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -284,6 +284,7 @@ struct rpc_xprt { } stat; struct net *xprt_net; + netns_tracker ns_tracker; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a02de2bddb28..5af484d6ba5e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1835,7 +1835,7 @@ EXPORT_SYMBOL_GPL(xprt_alloc); void xprt_free(struct rpc_xprt *xprt) { - put_net(xprt->xprt_net); + put_net_track(xprt->xprt_net, &xprt->ns_tracker); xprt_free_all_slots(xprt); xprt_free_id(xprt); rpc_sysfs_xprt_destroy(xprt); @@ -2027,7 +2027,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) xprt_init_xid(xprt); - xprt->xprt_net = get_net(net); + xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL); } /** -- cgit v1.2.3-71-gd317 From 7f5056b9e7b71149bf11073f00a57fa1ac2921a9 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 26 Jan 2022 15:35:14 -0500 Subject: security, lsm: dentry_init_security() Handle multi LSM registration A ceph user has reported that ceph is crashing with kernel NULL pointer dereference. Following is the backtrace. /proc/version: Linux version 5.16.2-arch1-1 (linux@archlinux) (gcc (GCC) 11.1.0, GNU ld (GNU Binutils) 2.36.1) #1 SMP PREEMPT Thu, 20 Jan 2022 16:18:29 +0000 distro / arch: Arch Linux / x86_64 SELinux is not enabled ceph cluster version: 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) relevant dmesg output: [ 30.947129] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 30.947206] #PF: supervisor read access in kernel mode [ 30.947258] #PF: error_code(0x0000) - not-present page [ 30.947310] PGD 0 P4D 0 [ 30.947342] Oops: 0000 [#1] PREEMPT SMP PTI [ 30.947388] CPU: 5 PID: 778 Comm: touch Not tainted 5.16.2-arch1-1 #1 86fbf2c313cc37a553d65deb81d98e9dcc2a3659 [ 30.947486] Hardware name: Gigabyte Technology Co., Ltd. B365M DS3H/B365M DS3H, BIOS F5 08/13/2019 [ 30.947569] RIP: 0010:strlen+0x0/0x20 [ 30.947616] Code: b6 07 38 d0 74 16 48 83 c7 01 84 c0 74 05 48 39 f7 75 ec 31 c0 31 d2 89 d6 89 d7 c3 48 89 f8 31 d2 89 d6 89 d7 c3 0 f 1f 40 00 <80> 3f 00 74 12 48 89 f8 48 83 c0 01 80 38 00 75 f7 48 29 f8 31 ff [ 30.947782] RSP: 0018:ffffa4ed80ffbbb8 EFLAGS: 00010246 [ 30.947836] RAX: 0000000000000000 RBX: ffffa4ed80ffbc60 RCX: 0000000000000000 [ 30.947904] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 30.947971] RBP: ffff94b0d15c0ae0 R08: 0000000000000000 R09: 0000000000000000 [ 30.948040] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 [ 30.948106] R13: 0000000000000001 R14: ffffa4ed80ffbc60 R15: 0000000000000000 [ 30.948174] FS: 00007fc7520f0740(0000) GS:ffff94b7ced40000(0000) knlGS:0000000000000000 [ 30.948252] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 30.948308] CR2: 0000000000000000 CR3: 0000000104a40001 CR4: 00000000003706e0 [ 30.948376] Call Trace: [ 30.948404] [ 30.948431] ceph_security_init_secctx+0x7b/0x240 [ceph 49f9c4b9bf5be8760f19f1747e26da33920bce4b] [ 30.948582] ceph_atomic_open+0x51e/0x8a0 [ceph 49f9c4b9bf5be8760f19f1747e26da33920bce4b] [ 30.948708] ? get_cached_acl+0x4d/0xa0 [ 30.948759] path_openat+0x60d/0x1030 [ 30.948809] do_filp_open+0xa5/0x150 [ 30.948859] do_sys_openat2+0xc4/0x190 [ 30.948904] __x64_sys_openat+0x53/0xa0 [ 30.948948] do_syscall_64+0x5c/0x90 [ 30.948989] ? exc_page_fault+0x72/0x180 [ 30.949034] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 30.949091] RIP: 0033:0x7fc7521e25bb [ 30.950849] Code: 25 00 00 41 00 3d 00 00 41 00 74 4b 64 8b 04 25 18 00 00 00 85 c0 75 67 44 89 e2 48 89 ee bf 9c ff ff ff b8 01 01 0 0 00 0f 05 <48> 3d 00 f0 ff ff 0f 87 91 00 00 00 48 8b 54 24 28 64 48 2b 14 25 Core of the problem is that ceph checks for return code from security_dentry_init_security() and if return code is 0, it assumes everything is fine and continues to call strlen(name), which crashes. Typically SELinux LSM returns 0 and sets name to "security.selinux" and it is not a problem. Or if selinux is not compiled in or disabled, it returns -EOPNOTSUP and ceph deals with it. But somehow in this configuration, 0 is being returned and "name" is not being initialized and that's creating the problem. Our suspicion is that BPF LSM is registering a hook for dentry_init_security() and returns hook default of 0. LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,...) I have not been able to reproduce it just by doing CONFIG_BPF_LSM=y. Stephen has tested the patch though and confirms it solves the problem for him. dentry_init_security() is written in such a way that it expects only one LSM to register the hook. Atleast that's the expectation with current code. If another LSM returns a hook and returns default, it will simply return 0 as of now and that will break ceph. Hence, suggestion is that change semantics of this hook a bit. If there are no LSMs or no LSM is taking ownership and initializing security context, then return -EOPNOTSUP. Also allow at max one LSM to initialize security context. This hook can't deal with multiple LSMs trying to init security context. This patch implements this new behavior. Reported-by: Stephen Muth Tested-by: Stephen Muth Suggested-by: Casey Schaufler Acked-by: Casey Schaufler Reviewed-by: Serge Hallyn Cc: Jeff Layton Cc: Christian Brauner Cc: Paul Moore Cc: # 5.16.0 Signed-off-by: Vivek Goyal Reviewed-by: Jeff Layton Acked-by: Paul Moore Acked-by: Christian Brauner Signed-off-by: James Morris --- include/linux/lsm_hook_defs.h | 2 +- security/security.c | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index df8de62f4710..f0c7b352340a 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -82,7 +82,7 @@ LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val, int len, void **mnt_opts) LSM_HOOK(int, 0, move_mount, const struct path *from_path, const struct path *to_path) -LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry, +LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, void **ctx, u32 *ctxlen) LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, diff --git a/security/security.c b/security/security.c index c88167a414b4..64abdfb20bc2 100644 --- a/security/security.c +++ b/security/security.c @@ -1056,8 +1056,19 @@ int security_dentry_init_security(struct dentry *dentry, int mode, const char **xattr_name, void **ctx, u32 *ctxlen) { - return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode, - name, xattr_name, ctx, ctxlen); + struct security_hook_list *hp; + int rc; + + /* + * Only one module will provide a security context. + */ + hlist_for_each_entry(hp, &security_hook_heads.dentry_init_security, list) { + rc = hp->hook.dentry_init_security(dentry, mode, name, + xattr_name, ctx, ctxlen); + if (rc != LSM_RET_DEFAULT(dentry_init_security)) + return rc; + } + return LSM_RET_DEFAULT(dentry_init_security); } EXPORT_SYMBOL(security_dentry_init_security); -- cgit v1.2.3-71-gd317 From e45c47d1f94e0cc7b6b079fdb4bcce2995e2adc4 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 28 Jan 2022 10:58:39 -0500 Subject: block: add bio_start_io_acct_time() to control start_time bio_start_io_acct_time() interface is like bio_start_io_acct() that allows start_time to be passed in. This gives drivers the ability to defer starting accounting until after IO is issued (but possibily not entirely due to bio splitting). Reviewed-by: Christoph Hellwig Signed-off-by: Mike Snitzer Link: https://lore.kernel.org/r/20220128155841.39644-2-snitzer@redhat.com Signed-off-by: Jens Axboe --- block/blk-core.c | 25 +++++++++++++++++++------ include/linux/blkdev.h | 1 + 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 97f8bc8d3a79..d93e3bb9a769 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1061,20 +1061,32 @@ again: } static unsigned long __part_start_io_acct(struct block_device *part, - unsigned int sectors, unsigned int op) + unsigned int sectors, unsigned int op, + unsigned long start_time) { const int sgrp = op_stat_group(op); - unsigned long now = READ_ONCE(jiffies); part_stat_lock(); - update_io_ticks(part, now, false); + update_io_ticks(part, start_time, false); part_stat_inc(part, ios[sgrp]); part_stat_add(part, sectors[sgrp], sectors); part_stat_local_inc(part, in_flight[op_is_write(op)]); part_stat_unlock(); - return now; + return start_time; +} + +/** + * bio_start_io_acct_time - start I/O accounting for bio based drivers + * @bio: bio to start account for + * @start_time: start time that should be passed back to bio_end_io_acct(). + */ +void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) +{ + __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), + bio_op(bio), start_time); } +EXPORT_SYMBOL_GPL(bio_start_io_acct_time); /** * bio_start_io_acct - start I/O accounting for bio based drivers @@ -1084,14 +1096,15 @@ static unsigned long __part_start_io_acct(struct block_device *part, */ unsigned long bio_start_io_acct(struct bio *bio) { - return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); + return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), + bio_op(bio), jiffies); } EXPORT_SYMBOL_GPL(bio_start_io_acct); unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned int op) { - return __part_start_io_acct(disk->part0, sectors, op); + return __part_start_io_acct(disk->part0, sectors, op, jiffies); } EXPORT_SYMBOL(disk_start_io_acct); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9c95df26fc26..f35aea98bc35 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1258,6 +1258,7 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, void disk_end_io_acct(struct gendisk *disk, unsigned int op, unsigned long start_time); +void bio_start_io_acct_time(struct bio *bio, unsigned long start_time); unsigned long bio_start_io_acct(struct bio *bio); void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, struct block_device *orig_bdev); -- cgit v1.2.3-71-gd317 From ca0acb511c21738b32386ce0f85c284b351d919e Mon Sep 17 00:00:00 2001 From: Akhil R Date: Fri, 28 Jan 2022 17:14:25 +0530 Subject: device property: Add fwnode_irq_get_byname Add fwnode_irq_get_byname() to get an interrupt by name from either ACPI table or Device Tree, whichever is used for enumeration. In the ACPI case, this allow us to use 'interrupt-names' in _DSD which can be mapped to Interrupt() resource by index. The implementation is similar to 'interrupt-names' in the Device Tree. Signed-off-by: Akhil R Reviewed-by: Andy Shevchenko Acked-by: Rafael J. Wysocki Signed-off-by: Wolfram Sang --- drivers/base/property.c | 29 +++++++++++++++++++++++++++++ include/linux/property.h | 1 + 2 files changed, 30 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/property.c b/drivers/base/property.c index e6497f6877ee..fc59e0f7f9cc 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -935,6 +935,35 @@ void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) } EXPORT_SYMBOL(fwnode_iomap); +/** + * fwnode_irq_get_byname - Get IRQ from a fwnode using its name + * @fwnode: Pointer to the firmware node + * @name: IRQ name + * + * Description: + * Find a match to the string @name in the 'interrupt-names' string array + * in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ + * number of the IRQ resource corresponding to the index of the matched + * string. + * + * Return: + * Linux IRQ number on success, or negative errno otherwise. + */ +int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name) +{ + int index; + + if (!name) + return -EINVAL; + + index = fwnode_property_match_string(fwnode, "interrupt-names", name); + if (index < 0) + return index; + + return fwnode_irq_get(fwnode, index); +} +EXPORT_SYMBOL(fwnode_irq_get_byname); + /** * fwnode_graph_get_next_endpoint - Get next endpoint firmware node * @fwnode: Pointer to the parent firmware node diff --git a/include/linux/property.h b/include/linux/property.h index 7399a0b45f98..95d56a562b6a 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -121,6 +121,7 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); +int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index); -- cgit v1.2.3-71-gd317 From a263a84088f689bf0c1552a510b25d0bcc45fcae Mon Sep 17 00:00:00 2001 From: Akhil R Date: Fri, 28 Jan 2022 17:14:27 +0530 Subject: i2c: smbus: Use device_*() functions instead of of_*() Change of_*() functions to device_*() for firmware agnostic usage. This allows to have the smbus_alert interrupt without any changes in the controller drivers using the ACPI table. Signed-off-by: Akhil R Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 2 +- drivers/i2c/i2c-core-smbus.c | 11 ++++++----- drivers/i2c/i2c-smbus.c | 5 +++-- include/linux/i2c-smbus.h | 6 +++--- 4 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 2c59dd748a49..32a45260c1f3 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1479,7 +1479,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap) goto out_list; } - res = of_i2c_setup_smbus_alert(adap); + res = i2c_setup_smbus_alert(adap); if (res) goto out_reg; diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index e5b2d1465e7e..304c2c8fee68 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "i2c-core.h" @@ -701,13 +702,13 @@ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, } EXPORT_SYMBOL_GPL(i2c_new_smbus_alert_device); -#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) -int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter) +#if IS_ENABLED(CONFIG_I2C_SMBUS) +int i2c_setup_smbus_alert(struct i2c_adapter *adapter) { int irq; - irq = of_property_match_string(adapter->dev.of_node, "interrupt-names", - "smbus_alert"); + irq = device_property_match_string(adapter->dev.parent, "interrupt-names", + "smbus_alert"); if (irq == -EINVAL || irq == -ENODATA) return 0; else if (irq < 0) @@ -715,5 +716,5 @@ int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter) return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL)); } -EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert); +EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert); #endif diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index d3d06e3b4f3b..775332945ad0 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -128,7 +128,8 @@ static int smbalert_probe(struct i2c_client *ara, if (setup) { irq = setup->irq; } else { - irq = of_irq_get_byname(adapter->dev.of_node, "smbus_alert"); + irq = fwnode_irq_get_byname(dev_fwnode(adapter->dev.parent), + "smbus_alert"); if (irq <= 0) return irq; } diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 1ef421818d3a..95cf902e0bda 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -30,10 +30,10 @@ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); -#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) -int of_i2c_setup_smbus_alert(struct i2c_adapter *adap); +#if IS_ENABLED(CONFIG_I2C_SMBUS) +int i2c_setup_smbus_alert(struct i2c_adapter *adap); #else -static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) +static inline int i2c_setup_smbus_alert(struct i2c_adapter *adap) { return 0; } -- cgit v1.2.3-71-gd317 From 6cb917411e028dcb66ce8f5db1b47361b78d7d3f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 29 Jan 2022 13:40:52 -0800 Subject: include/linux/sysctl.h: fix register_sysctl_mount_point() return type The CONFIG_SYSCTL=n stub returns the wrong type. Fixes: ee9efac48a082 ("sysctl: add helper to register a sysctl mount point") Reported-by: kernel test robot Acked-by: Luis Chamberlain Cc: Tong Zhang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sysctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 180adf7da785..6353d6db69b2 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -265,7 +265,7 @@ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * return NULL; } -static inline struct sysctl_header *register_sysctl_mount_point(const char *path) +static inline struct ctl_table_header *register_sysctl_mount_point(const char *path) { return NULL; } -- cgit v1.2.3-71-gd317 From 536f4217ced62b671bd759f6b549621a5654a70f Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Sat, 29 Jan 2022 13:41:04 -0800 Subject: mm: page->mapping folio->mapping should have the same offset As with the other members of folio, the offset of page->mapping and folio->mapping must be the same. The compile-time check was inadvertently removed during development. Add it back. [willy@infradead.org: changelog redo] Link: https://lkml.kernel.org/r/20220104011734.21714-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9db36dc5d4cf..5140e5feb486 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -261,6 +261,7 @@ static_assert(sizeof(struct page) == sizeof(struct folio)); static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl)) FOLIO_MATCH(flags, flags); FOLIO_MATCH(lru, lru); +FOLIO_MATCH(mapping, mapping); FOLIO_MATCH(compound_head, lru); FOLIO_MATCH(index, index); FOLIO_MATCH(private, private); -- cgit v1.2.3-71-gd317 From 27fe73394a1c6d0b07fa4d95f1bca116d1cc66e9 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Sat, 29 Jan 2022 13:41:14 -0800 Subject: mm, kasan: use compare-exchange operation to set KASAN page tag It has been reported that the tag setting operation on newly-allocated pages can cause the page flags to be corrupted when performed concurrently with other flag updates as a result of the use of non-atomic operations. Fix the problem by using a compare-exchange loop to update the tag. Link: https://lkml.kernel.org/r/20220120020148.1632253-1-pcc@google.com Link: https://linux-review.googlesource.com/id/I456b24a2b9067d93968d43b4bb3351c0cec63101 Fixes: 2813b9c02962 ("kasan, mm, arm64: tag non slab memory allocated via pagealloc") Signed-off-by: Peter Collingbourne Reviewed-by: Andrey Konovalov Cc: Peter Zijlstra Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index e1a84b1e6787..213cc569b192 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1506,11 +1506,18 @@ static inline u8 page_kasan_tag(const struct page *page) static inline void page_kasan_tag_set(struct page *page, u8 tag) { - if (kasan_enabled()) { - tag ^= 0xff; - page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); - page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; - } + unsigned long old_flags, flags; + + if (!kasan_enabled()) + return; + + tag ^= 0xff; + old_flags = READ_ONCE(page->flags); + do { + flags = old_flags; + flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); + flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; + } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); } static inline void page_kasan_tag_reset(struct page *page) -- cgit v1.2.3-71-gd317 From 51e50fbd3efc6064c30ed73a5e009018b36e290a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 29 Jan 2022 13:41:17 -0800 Subject: psi: fix "no previous prototype" warnings when CONFIG_CGROUPS=n When CONFIG_CGROUPS is disabled psi code generates the following warnings: kernel/sched/psi.c:1112:21: warning: no previous prototype for 'psi_trigger_create' [-Wmissing-prototypes] 1112 | struct psi_trigger *psi_trigger_create(struct psi_group *group, | ^~~~~~~~~~~~~~~~~~ kernel/sched/psi.c:1182:6: warning: no previous prototype for 'psi_trigger_destroy' [-Wmissing-prototypes] 1182 | void psi_trigger_destroy(struct psi_trigger *t) | ^~~~~~~~~~~~~~~~~~~ kernel/sched/psi.c:1249:10: warning: no previous prototype for 'psi_trigger_poll' [-Wmissing-prototypes] 1249 | __poll_t psi_trigger_poll(void **trigger_ptr, | ^~~~~~~~~~~~~~~~ Change the declarations of these functions in the header to provide the prototypes even when they are unused. Link: https://lkml.kernel.org/r/20220119223940.787748-2-surenb@google.com Fixes: 0e94682b73bf ("psi: introduce psi monitor") Signed-off-by: Suren Baghdasaryan Reported-by: kernel test robot Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/psi.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/psi.h b/include/linux/psi.h index f8ce53bfdb2a..7f7d1d88c3bb 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -25,18 +25,17 @@ void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); - -#ifdef CONFIG_CGROUPS -int psi_cgroup_alloc(struct cgroup *cgrp); -void psi_cgroup_free(struct cgroup *cgrp); -void cgroup_move_task(struct task_struct *p, struct css_set *to); - struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); + +#ifdef CONFIG_CGROUPS +int psi_cgroup_alloc(struct cgroup *cgrp); +void psi_cgroup_free(struct cgroup *cgrp); +void cgroup_move_task(struct task_struct *p, struct css_set *to); #endif #else /* CONFIG_PSI */ -- cgit v1.2.3-71-gd317 From e820a33748b5e22cecafddf919a7d8679949deb1 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Jan 2022 15:53:50 +0200 Subject: math.h: Introduce data types for fractional numbers Introduce a macro to produce data types like struct TYPE_fract { __TYPE numerator; __TYPE denominator; }; to be used in the code wherever it's needed. In the following changes convert some users to it. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220126135353.24007-1-andriy.shevchenko@linux.intel.com Signed-off-by: Jonathan Cameron --- include/linux/math.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/math.h b/include/linux/math.h index 53674a327e39..439b8f0b9ebd 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -2,6 +2,7 @@ #ifndef _LINUX_MATH_H #define _LINUX_MATH_H +#include #include #include @@ -106,6 +107,17 @@ } \ ) +#define __STRUCT_FRACT(type) \ +struct type##_fract { \ + __##type numerator; \ + __##type denominator; \ +}; +__STRUCT_FRACT(s16) +__STRUCT_FRACT(u16) +__STRUCT_FRACT(s32) +__STRUCT_FRACT(u32) +#undef __STRUCT_FRACT + /* * Multiplies an integer by a fraction, while avoiding unnecessary * overflow or loss of precision. -- cgit v1.2.3-71-gd317 From a5e9b2ddbbc789f34be2333263232435d8edf57c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Jan 2022 15:53:53 +0200 Subject: iio: adc: qcom-vadc-common: Re-use generic struct u32_fract Instead of custom data type re-use generic struct u32_fract. No changes intended. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220126135353.24007-4-andriy.shevchenko@linux.intel.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/qcom-pm8xxx-xoadc.c | 15 +++--- drivers/iio/adc/qcom-spmi-vadc.c | 24 ++++----- drivers/iio/adc/qcom-vadc-common.c | 92 ++++++++++++++++---------------- include/linux/iio/adc/qcom-vadc-common.h | 15 ++---- 4 files changed, 69 insertions(+), 77 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c index 21d7eff645c3..5e9e56821075 100644 --- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c +++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c @@ -175,7 +175,7 @@ struct xoadc_channel { const char *datasheet_name; u8 pre_scale_mux:2; u8 amux_channel:4; - const struct vadc_prescale_ratio prescale; + const struct u32_fract prescale; enum iio_chan_type type; enum vadc_scale_fn_type scale_fn_type; u8 amux_ip_rsv:3; @@ -218,7 +218,9 @@ struct xoadc_variant { .datasheet_name = __stringify(_dname), \ .pre_scale_mux = _presmux, \ .amux_channel = _amux, \ - .prescale = { .num = _prenum, .den = _preden }, \ + .prescale = { \ + .numerator = _prenum, .denominator = _preden, \ + }, \ .type = _type, \ .scale_fn_type = _scale, \ .amux_ip_rsv = _amip, \ @@ -809,12 +811,11 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev, BIT(IIO_CHAN_INFO_PROCESSED); iio_chan->indexed = 1; - dev_dbg(dev, "channel [PRESCALE/MUX: %02x AMUX: %02x] \"%s\" " - "ref voltage: %d, decimation %d " - "prescale %d/%d, scale function %d\n", + dev_dbg(dev, + "channel [PRESCALE/MUX: %02x AMUX: %02x] \"%s\" ref voltage: %d, decimation %d prescale %d/%d, scale function %d\n", hwchan->pre_scale_mux, hwchan->amux_channel, ch->name, - ch->amux_ip_rsv, ch->decimation, hwchan->prescale.num, - hwchan->prescale.den, hwchan->scale_fn_type); + ch->amux_ip_rsv, ch->decimation, hwchan->prescale.numerator, + hwchan->prescale.denominator, hwchan->scale_fn_type); return 0; } diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 07b1a99381d9..34202ba52469 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -122,15 +122,15 @@ struct vadc_priv { struct mutex lock; }; -static const struct vadc_prescale_ratio vadc_prescale_ratios[] = { - {.num = 1, .den = 1}, - {.num = 1, .den = 3}, - {.num = 1, .den = 4}, - {.num = 1, .den = 6}, - {.num = 1, .den = 20}, - {.num = 1, .den = 8}, - {.num = 10, .den = 81}, - {.num = 1, .den = 10} +static const struct u32_fract vadc_prescale_ratios[] = { + { .numerator = 1, .denominator = 1 }, + { .numerator = 1, .denominator = 3 }, + { .numerator = 1, .denominator = 4 }, + { .numerator = 1, .denominator = 6 }, + { .numerator = 1, .denominator = 20 }, + { .numerator = 1, .denominator = 8 }, + { .numerator = 10, .denominator = 81 }, + { .numerator = 1, .denominator = 10 }, }; static int vadc_read(struct vadc_priv *vadc, u16 offset, u8 *data) @@ -404,13 +404,13 @@ err: return ret; } -static int vadc_prescaling_from_dt(u32 num, u32 den) +static int vadc_prescaling_from_dt(u32 numerator, u32 denominator) { unsigned int pre; for (pre = 0; pre < ARRAY_SIZE(vadc_prescale_ratios); pre++) - if (vadc_prescale_ratios[pre].num == num && - vadc_prescale_ratios[pre].den == den) + if (vadc_prescale_ratios[pre].numerator == numerator && + vadc_prescale_ratios[pre].denominator == denominator) break; if (pre == ARRAY_SIZE(vadc_prescale_ratios)) diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c index 14723896aab2..6c6aec848f98 100644 --- a/drivers/iio/adc/qcom-vadc-common.c +++ b/drivers/iio/adc/qcom-vadc-common.c @@ -289,44 +289,44 @@ static const struct vadc_map_pt adcmap7_100k[] = { { 2420, 130048 } }; -static const struct vadc_prescale_ratio adc5_prescale_ratios[] = { - {.num = 1, .den = 1}, - {.num = 1, .den = 3}, - {.num = 1, .den = 4}, - {.num = 1, .den = 6}, - {.num = 1, .den = 20}, - {.num = 1, .den = 8}, - {.num = 10, .den = 81}, - {.num = 1, .den = 10}, - {.num = 1, .den = 16} +static const struct u32_fract adc5_prescale_ratios[] = { + { .numerator = 1, .denominator = 1 }, + { .numerator = 1, .denominator = 3 }, + { .numerator = 1, .denominator = 4 }, + { .numerator = 1, .denominator = 6 }, + { .numerator = 1, .denominator = 20 }, + { .numerator = 1, .denominator = 8 }, + { .numerator = 10, .denominator = 81 }, + { .numerator = 1, .denominator = 10 }, + { .numerator = 1, .denominator = 16 }, }; static int qcom_vadc_scale_hw_calib_volt( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_uv); static int qcom_vadc_scale_hw_calib_therm( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int qcom_vadc7_scale_hw_calib_therm( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int qcom_vadc_scale_hw_smb_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int qcom_vadc_scale_hw_chg5_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int qcom_vadc_scale_hw_calib_die_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); static int qcom_vadc7_scale_hw_calib_die_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec); @@ -406,7 +406,7 @@ static void qcom_vadc_scale_calib(const struct vadc_linear_graph *calib_graph, } static int qcom_vadc_scale_volt(const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result_uv) { @@ -414,15 +414,15 @@ static int qcom_vadc_scale_volt(const struct vadc_linear_graph *calib_graph, qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage); - voltage = voltage * prescale->den; - result = div64_s64(voltage, prescale->num); + voltage *= prescale->denominator; + result = div64_s64(voltage, prescale->numerator); *result_uv = result; return 0; } static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result_mdec) { @@ -444,7 +444,7 @@ static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph, } static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result_mdec) { @@ -454,8 +454,8 @@ static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph, qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage); if (voltage > 0) { - temp = voltage * prescale->den; - do_div(temp, prescale->num * 2); + temp = voltage * prescale->denominator; + do_div(temp, prescale->numerator * 2); voltage = temp; } else { voltage = 0; @@ -467,7 +467,7 @@ static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph, } static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result_mdec) { @@ -475,8 +475,8 @@ static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph, qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage); - voltage = voltage * prescale->den; - voltage = div64_s64(voltage, prescale->num); + voltage *= prescale->denominator; + voltage = div64_s64(voltage, prescale->numerator); voltage = ((PMI_CHG_SCALE_1) * (voltage * 2)); voltage = (voltage + PMI_CHG_SCALE_2); result = div64_s64(voltage, 1000000); @@ -487,21 +487,21 @@ static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph, /* convert voltage to ADC code, using 1.875V reference */ static u16 qcom_vadc_scale_voltage_code(s32 voltage, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const u32 full_scale_code_volt, unsigned int factor) { s64 volt = voltage; s64 adc_vdd_ref_mv = 1875; /* reference voltage */ - volt *= prescale->num * factor * full_scale_code_volt; - volt = div64_s64(volt, (s64)prescale->den * adc_vdd_ref_mv * 1000); + volt *= prescale->numerator * factor * full_scale_code_volt; + volt = div64_s64(volt, (s64)prescale->denominator * adc_vdd_ref_mv * 1000); return volt; } static int qcom_vadc_scale_code_voltage_factor(u16 adc_code, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, unsigned int factor) { @@ -520,8 +520,8 @@ static int qcom_vadc_scale_code_voltage_factor(u16 adc_code, voltage = (s64) adc_code * adc_vdd_ref_mv * 1000; voltage = div64_s64(voltage, data->full_scale_code_volt); if (voltage > 0) { - voltage *= prescale->den; - temp = prescale->num * factor; + voltage *= prescale->denominator; + temp = prescale->numerator * factor; voltage = div64_s64(voltage, temp); } else { voltage = 0; @@ -531,7 +531,7 @@ static int qcom_vadc_scale_code_voltage_factor(u16 adc_code, } static int qcom_vadc7_scale_hw_calib_therm( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -557,7 +557,7 @@ static int qcom_vadc7_scale_hw_calib_therm( } static int qcom_vadc_scale_hw_calib_volt( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_uv) { @@ -568,7 +568,7 @@ static int qcom_vadc_scale_hw_calib_volt( } static int qcom_vadc_scale_hw_calib_therm( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -584,7 +584,7 @@ static int qcom_vadc_scale_hw_calib_therm( } static int qcom_vadc_scale_hw_calib_die_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -596,7 +596,7 @@ static int qcom_vadc_scale_hw_calib_die_temp( } static int qcom_vadc7_scale_hw_calib_die_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -611,7 +611,7 @@ static int qcom_vadc7_scale_hw_calib_die_temp( } static int qcom_vadc_scale_hw_smb_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -623,7 +623,7 @@ static int qcom_vadc_scale_hw_smb_temp( } static int qcom_vadc_scale_hw_chg5_temp( - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result_mdec) { @@ -636,7 +636,7 @@ static int qcom_vadc_scale_hw_chg5_temp( int qcom_vadc_scale(enum vadc_scale_fn_type scaletype, const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result) { @@ -667,7 +667,7 @@ EXPORT_SYMBOL(qcom_vadc_scale); u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio, u32 full_scale_code_volt, int temp) { - const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio]; + const struct u32_fract *prescale = &adc5_prescale_ratios[prescale_ratio]; s32 voltage; voltage = qcom_vadc_map_temp_voltage(adcmap_100k_104ef_104fb_1875_vref, @@ -682,7 +682,7 @@ int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype, const struct adc5_data *data, u16 adc_code, int *result) { - const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio]; + const struct u32_fract *prescale = &adc5_prescale_ratios[prescale_ratio]; if (!(scaletype >= SCALE_HW_CALIB_DEFAULT && scaletype < SCALE_HW_CALIB_INVALID)) { @@ -695,13 +695,13 @@ int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype, } EXPORT_SYMBOL(qcom_adc5_hw_scale); -int qcom_adc5_prescaling_from_dt(u32 num, u32 den) +int qcom_adc5_prescaling_from_dt(u32 numerator, u32 denominator) { unsigned int pre; for (pre = 0; pre < ARRAY_SIZE(adc5_prescale_ratios); pre++) - if (adc5_prescale_ratios[pre].num == num && - adc5_prescale_ratios[pre].den == den) + if (adc5_prescale_ratios[pre].numerator == numerator && + adc5_prescale_ratios[pre].denominator == denominator) break; if (pre == ARRAY_SIZE(adc5_prescale_ratios)) diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h index 33f60f43e1aa..ce78d4804994 100644 --- a/include/linux/iio/adc/qcom-vadc-common.h +++ b/include/linux/iio/adc/qcom-vadc-common.h @@ -6,6 +6,7 @@ #ifndef QCOM_VADC_COMMON_H #define QCOM_VADC_COMMON_H +#include #include #define VADC_CONV_TIME_MIN_US 2000 @@ -79,16 +80,6 @@ struct vadc_linear_graph { s32 gnd; }; -/** - * struct vadc_prescale_ratio - Represent scaling ratio for ADC input. - * @num: the inverse numerator of the gain applied to the input channel. - * @den: the inverse denominator of the gain applied to the input channel. - */ -struct vadc_prescale_ratio { - u32 num; - u32 den; -}; - /** * enum vadc_scale_fn_type - Scaling function to convert ADC code to * physical scaled units for the channel. @@ -144,12 +135,12 @@ struct adc5_data { int qcom_vadc_scale(enum vadc_scale_fn_type scaletype, const struct vadc_linear_graph *calib_graph, - const struct vadc_prescale_ratio *prescale, + const struct u32_fract *prescale, bool absolute, u16 adc_code, int *result_mdec); struct qcom_adc5_scale_type { - int (*scale_fn)(const struct vadc_prescale_ratio *prescale, + int (*scale_fn)(const struct u32_fract *prescale, const struct adc5_data *data, u16 adc_code, int *result); }; -- cgit v1.2.3-71-gd317 From 73c105ad2a3e142d81fc59761ce8353d0b211b8f Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Fri, 28 Jan 2022 21:32:40 +0300 Subject: phy: make phy_set_max_speed() *void* After following the call tree of phy_set_max_speed(), it became clear that this function never returns anything but 0, so we can change its result type to *void* and drop the result checks from the three drivers that actually bothered to do it... Found by Linux Verification Center (linuxtesting.org) with the SVACE static analysis tool. Signed-off-by: Sergey Shtylyov Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 8 +------- drivers/net/ethernet/renesas/sh_eth.c | 10 ++-------- drivers/net/phy/aquantia_main.c | 4 +--- drivers/net/phy/phy-core.c | 22 ++++++++-------------- include/linux/phy.h | 2 +- 5 files changed, 13 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b215cde68e10..80366661a361 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1432,11 +1432,7 @@ static int ravb_phy_init(struct net_device *ndev) * at this time. */ if (soc_device_match(r8a7795es10)) { - err = phy_set_max_speed(phydev, SPEED_100); - if (err) { - netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); - goto err_phy_disconnect; - } + phy_set_max_speed(phydev, SPEED_100); netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } @@ -1457,8 +1453,6 @@ static int ravb_phy_init(struct net_device *ndev) return 0; -err_phy_disconnect: - phy_disconnect(phydev); err_deregister_fixed_link: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d947a628e166..8aa91e99227d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2026,14 +2026,8 @@ static int sh_eth_phy_init(struct net_device *ndev) } /* mask with MAC supported features */ - if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { - int err = phy_set_max_speed(phydev, SPEED_100); - if (err) { - netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); - phy_disconnect(phydev); - return err; - } - } + if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) + phy_set_max_speed(phydev, SPEED_100); phy_attached_info(phydev); diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 968dd43a2b1e..a8db1a19011b 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -533,9 +533,7 @@ static int aqcs109_config_init(struct phy_device *phydev) * PMA speed ability bits are the same for all members of the family, * AQCS109 however supports speeds up to 2.5G only. */ - ret = phy_set_max_speed(phydev, SPEED_2500); - if (ret) - return ret; + phy_set_max_speed(phydev, SPEED_2500); return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 271fc01f7f7f..2001f3329133 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -243,7 +243,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } -static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr) +static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr) { const struct phy_setting *p; int i; @@ -254,13 +254,11 @@ static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr) else break; } - - return 0; } -static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) +static void __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - return __set_linkmode_max_speed(max_speed, phydev->supported); + __set_linkmode_max_speed(max_speed, phydev->supported); } /** @@ -273,17 +271,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) * is connected to a 1G PHY. This function allows the MAC to indicate its * maximum speed, and so limit what the PHY will advertise. */ -int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) +void phy_set_max_speed(struct phy_device *phydev, u32 max_speed) { - int err; - - err = __set_phy_supported(phydev, max_speed); - if (err) - return err; + __set_phy_supported(phydev, max_speed); phy_advertise_supported(phydev); - - return 0; } EXPORT_SYMBOL(phy_set_max_speed); @@ -440,7 +432,9 @@ int phy_speed_down_core(struct phy_device *phydev) if (min_common_speed == SPEED_UNKNOWN) return -EINVAL; - return __set_linkmode_max_speed(min_common_speed, phydev->advertising); + __set_linkmode_max_speed(min_common_speed, phydev->advertising); + + return 0; } static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, diff --git a/include/linux/phy.h b/include/linux/phy.h index 6de8d7a90d78..cd08cf1a8b0d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1661,7 +1661,7 @@ int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); -int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); +void phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); void phy_advertise_supported(struct phy_device *phydev); void phy_support_sym_pause(struct phy_device *phydev); -- cgit v1.2.3-71-gd317 From 13e906e50a8cf6033f22c03c4d772e36a9e02c6b Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 27 Jan 2022 12:01:07 -0800 Subject: component: Replace most references to 'master' with 'aggregate device' Remove most references to 'master' in the code and replace them with some form of 'aggregate device'. This better reflects the reality of what this code does, i.e. an aggregate device that represents a device like a GPU card once some set of devices that make up the aggregate device probe and register with the component framework. Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Laurent Pinchart Cc: "Rafael J. Wysocki" Cc: Rob Clark Cc: Russell King Cc: Saravana Kannan Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20220127200141.1295328-2-swboyd@chromium.org Signed-off-by: Greg Kroah-Hartman --- drivers/base/component.c | 242 +++++++++++++++++++++++----------------------- include/linux/component.h | 18 ++-- 2 files changed, 128 insertions(+), 132 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/component.c b/drivers/base/component.c index 2d25a6416587..34f9e0802719 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Componentized device handling. - * - * This is work in progress. We gather up the component devices into a list, - * and bind them when instructed. At the moment, we're specific to the DRM - * subsystem, and only handles one master device, but this doesn't have to be - * the case. */ #include #include @@ -57,7 +52,7 @@ struct component_match { struct component_match_array *compare; }; -struct master { +struct aggregate_device { struct list_head node; bool bound; @@ -68,7 +63,7 @@ struct master { struct component { struct list_head node; - struct master *master; + struct aggregate_device *adev; bool bound; const struct component_ops *ops; @@ -78,7 +73,7 @@ struct component { static DEFINE_MUTEX(component_mutex); static LIST_HEAD(component_list); -static LIST_HEAD(masters); +static LIST_HEAD(aggregate_devices); #ifdef CONFIG_DEBUG_FS @@ -86,12 +81,12 @@ static struct dentry *component_debugfs_dir; static int component_devices_show(struct seq_file *s, void *data) { - struct master *m = s->private; + struct aggregate_device *m = s->private; struct component_match *match = m->match; size_t i; mutex_lock(&component_mutex); - seq_printf(s, "%-40s %20s\n", "master name", "status"); + seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status"); seq_puts(s, "-------------------------------------------------------------\n"); seq_printf(s, "%-40s %20s\n\n", dev_name(m->parent), m->bound ? "bound" : "not bound"); @@ -121,46 +116,46 @@ static int __init component_debug_init(void) core_initcall(component_debug_init); -static void component_master_debugfs_add(struct master *m) +static void component_debugfs_add(struct aggregate_device *m) { debugfs_create_file(dev_name(m->parent), 0444, component_debugfs_dir, m, &component_devices_fops); } -static void component_master_debugfs_del(struct master *m) +static void component_debugfs_del(struct aggregate_device *m) { debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir)); } #else -static void component_master_debugfs_add(struct master *m) +static void component_debugfs_add(struct aggregate_device *m) { } -static void component_master_debugfs_del(struct master *m) +static void component_debugfs_del(struct aggregate_device *m) { } #endif -static struct master *__master_find(struct device *parent, +static struct aggregate_device *__aggregate_find(struct device *parent, const struct component_master_ops *ops) { - struct master *m; + struct aggregate_device *m; - list_for_each_entry(m, &masters, node) + list_for_each_entry(m, &aggregate_devices, node) if (m->parent == parent && (!ops || m->ops == ops)) return m; return NULL; } -static struct component *find_component(struct master *master, +static struct component *find_component(struct aggregate_device *adev, struct component_match_array *mc) { struct component *c; list_for_each_entry(c, &component_list, node) { - if (c->master && c->master != master) + if (c->adev && c->adev != adev) continue; if (mc->compare && mc->compare(c->dev, mc->data)) @@ -174,102 +169,103 @@ static struct component *find_component(struct master *master, return NULL; } -static int find_components(struct master *master) +static int find_components(struct aggregate_device *adev) { - struct component_match *match = master->match; + struct component_match *match = adev->match; size_t i; int ret = 0; /* * Scan the array of match functions and attach - * any components which are found to this master. + * any components which are found to this adev. */ for (i = 0; i < match->num; i++) { struct component_match_array *mc = &match->compare[i]; struct component *c; - dev_dbg(master->parent, "Looking for component %zu\n", i); + dev_dbg(adev->parent, "Looking for component %zu\n", i); if (match->compare[i].component) continue; - c = find_component(master, mc); + c = find_component(adev, mc); if (!c) { ret = -ENXIO; break; } - dev_dbg(master->parent, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master); + dev_dbg(adev->parent, "found component %s, duplicate %u\n", + dev_name(c->dev), !!c->adev); - /* Attach this component to the master */ - match->compare[i].duplicate = !!c->master; + /* Attach this component to the adev */ + match->compare[i].duplicate = !!c->adev; match->compare[i].component = c; - c->master = master; + c->adev = adev; } return ret; } -/* Detach component from associated master */ -static void remove_component(struct master *master, struct component *c) +/* Detach component from associated aggregate_device */ +static void remove_component(struct aggregate_device *adev, struct component *c) { size_t i; - /* Detach the component from this master. */ - for (i = 0; i < master->match->num; i++) - if (master->match->compare[i].component == c) - master->match->compare[i].component = NULL; + /* Detach the component from this adev. */ + for (i = 0; i < adev->match->num; i++) + if (adev->match->compare[i].component == c) + adev->match->compare[i].component = NULL; } /* - * Try to bring up a master. If component is NULL, we're interested in - * this master, otherwise it's a component which must be present to try - * and bring up the master. + * Try to bring up an aggregate device. If component is NULL, we're interested + * in this aggregate device, otherwise it's a component which must be present + * to try and bring up the aggregate device. * * Returns 1 for successful bringup, 0 if not ready, or -ve errno. */ -static int try_to_bring_up_master(struct master *master, +static int try_to_bring_up_aggregate_device(struct aggregate_device *adev, struct component *component) { int ret; - dev_dbg(master->parent, "trying to bring up master\n"); + dev_dbg(adev->parent, "trying to bring up adev\n"); - if (find_components(master)) { - dev_dbg(master->parent, "master has incomplete components\n"); + if (find_components(adev)) { + dev_dbg(adev->parent, "master has incomplete components\n"); return 0; } - if (component && component->master != master) { - dev_dbg(master->parent, "master is not for this component (%s)\n", + if (component && component->adev != adev) { + dev_dbg(adev->parent, "master is not for this component (%s)\n", dev_name(component->dev)); return 0; } - if (!devres_open_group(master->parent, master, GFP_KERNEL)) + if (!devres_open_group(adev->parent, adev, GFP_KERNEL)) return -ENOMEM; /* Found all components */ - ret = master->ops->bind(master->parent); + ret = adev->ops->bind(adev->parent); if (ret < 0) { - devres_release_group(master->parent, NULL); + devres_release_group(adev->parent, NULL); if (ret != -EPROBE_DEFER) - dev_info(master->parent, "master bind failed: %d\n", ret); + dev_info(adev->parent, "adev bind failed: %d\n", ret); return ret; } - devres_close_group(master->parent, NULL); - master->bound = true; + devres_close_group(adev->parent, NULL); + adev->bound = true; return 1; } static int try_to_bring_up_masters(struct component *component) { - struct master *m; + struct aggregate_device *adev; int ret = 0; - list_for_each_entry(m, &masters, node) { - if (!m->bound) { - ret = try_to_bring_up_master(m, component); + list_for_each_entry(adev, &aggregate_devices, node) { + if (!adev->bound) { + ret = try_to_bring_up_aggregate_device(adev, component); if (ret != 0) break; } @@ -278,12 +274,12 @@ static int try_to_bring_up_masters(struct component *component) return ret; } -static void take_down_master(struct master *master) +static void take_down_aggregate_device(struct aggregate_device *adev) { - if (master->bound) { - master->ops->unbind(master->parent); - devres_release_group(master->parent, master); - master->bound = false; + if (adev->bound) { + adev->ops->unbind(adev->parent); + devres_release_group(adev->parent, adev); + adev->bound = false; } } @@ -324,7 +320,7 @@ static int component_match_realloc(struct component_match *match, size_t num) return 0; } -static void __component_match_add(struct device *master, +static void __component_match_add(struct device *parent, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), @@ -344,7 +340,7 @@ static void __component_match_add(struct device *master, return; } - devres_add(master, match); + devres_add(parent, match); *matchptr = match; } @@ -370,13 +366,13 @@ static void __component_match_add(struct device *master, /** * component_match_add_release - add a component match entry with release callback - * @master: device with the aggregate driver + * @parent: parent device of the aggregate driver * @matchptr: pointer to the list of component matches * @release: release function for @compare_data * @compare: compare function to match against all components * @compare_data: opaque pointer passed to the @compare function * - * Adds a new component match to the list stored in @matchptr, which the @master + * Adds a new component match to the list stored in @matchptr, which the * aggregate driver needs to function. The list of component matches pointed to * by @matchptr must be initialized to NULL before adding the first match. This * only matches against components added with component_add(). @@ -388,24 +384,24 @@ static void __component_match_add(struct device *master, * * See also component_match_add() and component_match_add_typed(). */ -void component_match_add_release(struct device *master, +void component_match_add_release(struct device *parent, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data) { - __component_match_add(master, matchptr, release, compare, NULL, + __component_match_add(parent, matchptr, release, compare, NULL, compare_data); } EXPORT_SYMBOL(component_match_add_release); /** * component_match_add_typed - add a component match entry for a typed component - * @master: device with the aggregate driver + * @parent: parent device of the aggregate driver * @matchptr: pointer to the list of component matches * @compare_typed: compare function to match against all typed components * @compare_data: opaque pointer passed to the @compare function * - * Adds a new component match to the list stored in @matchptr, which the @master + * Adds a new component match to the list stored in @matchptr, which the * aggregate driver needs to function. The list of component matches pointed to * by @matchptr must be initialized to NULL before adding the first match. This * only matches against components added with component_add_typed(). @@ -415,32 +411,32 @@ EXPORT_SYMBOL(component_match_add_release); * * See also component_match_add_release() and component_match_add_typed(). */ -void component_match_add_typed(struct device *master, +void component_match_add_typed(struct device *parent, struct component_match **matchptr, int (*compare_typed)(struct device *, int, void *), void *compare_data) { - __component_match_add(master, matchptr, NULL, NULL, compare_typed, + __component_match_add(parent, matchptr, NULL, NULL, compare_typed, compare_data); } EXPORT_SYMBOL(component_match_add_typed); -static void free_master(struct master *master) +static void free_aggregate_device(struct aggregate_device *adev) { - struct component_match *match = master->match; + struct component_match *match = adev->match; int i; - component_master_debugfs_del(master); - list_del(&master->node); + component_debugfs_del(adev); + list_del(&adev->node); if (match) { for (i = 0; i < match->num; i++) { struct component *c = match->compare[i].component; if (c) - c->master = NULL; + c->adev = NULL; } } - kfree(master); + kfree(adev); } /** @@ -459,7 +455,7 @@ int component_master_add_with_match(struct device *parent, const struct component_master_ops *ops, struct component_match *match) { - struct master *master; + struct aggregate_device *adev; int ret; /* Reallocate the match array for its true size */ @@ -467,23 +463,23 @@ int component_master_add_with_match(struct device *parent, if (ret) return ret; - master = kzalloc(sizeof(*master), GFP_KERNEL); - if (!master) + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) return -ENOMEM; - master->parent = parent; - master->ops = ops; - master->match = match; + adev->parent = parent; + adev->ops = ops; + adev->match = match; - component_master_debugfs_add(master); - /* Add to the list of available masters. */ + component_debugfs_add(adev); + /* Add to the list of available aggregate devices. */ mutex_lock(&component_mutex); - list_add(&master->node, &masters); + list_add(&adev->node, &aggregate_devices); - ret = try_to_bring_up_master(master, NULL); + ret = try_to_bring_up_aggregate_device(adev, NULL); if (ret < 0) - free_master(master); + free_aggregate_device(adev); mutex_unlock(&component_mutex); @@ -503,25 +499,25 @@ EXPORT_SYMBOL_GPL(component_master_add_with_match); void component_master_del(struct device *parent, const struct component_master_ops *ops) { - struct master *master; + struct aggregate_device *adev; mutex_lock(&component_mutex); - master = __master_find(parent, ops); - if (master) { - take_down_master(master); - free_master(master); + adev = __aggregate_find(parent, ops); + if (adev) { + take_down_aggregate_device(adev); + free_aggregate_device(adev); } mutex_unlock(&component_mutex); } EXPORT_SYMBOL_GPL(component_master_del); static void component_unbind(struct component *component, - struct master *master, void *data) + struct aggregate_device *adev, void *data) { WARN_ON(!component->bound); if (component->ops && component->ops->unbind) - component->ops->unbind(component->dev, master->parent, data); + component->ops->unbind(component->dev, adev->parent, data); component->bound = false; /* Release all resources claimed in the binding of this component */ @@ -539,26 +535,26 @@ static void component_unbind(struct component *component, */ void component_unbind_all(struct device *parent, void *data) { - struct master *master; + struct aggregate_device *adev; struct component *c; size_t i; WARN_ON(!mutex_is_locked(&component_mutex)); - master = __master_find(parent, NULL); - if (!master) + adev = __aggregate_find(parent, NULL); + if (!adev) return; /* Unbind components in reverse order */ - for (i = master->match->num; i--; ) - if (!master->match->compare[i].duplicate) { - c = master->match->compare[i].component; - component_unbind(c, master, data); + for (i = adev->match->num; i--; ) + if (!adev->match->compare[i].duplicate) { + c = adev->match->compare[i].component; + component_unbind(c, adev, data); } } EXPORT_SYMBOL_GPL(component_unbind_all); -static int component_bind(struct component *component, struct master *master, +static int component_bind(struct component *component, struct aggregate_device *adev, void *data) { int ret; @@ -568,7 +564,7 @@ static int component_bind(struct component *component, struct master *master, * This allows us to roll-back a failed component without * affecting anything else. */ - if (!devres_open_group(master->parent, NULL, GFP_KERNEL)) + if (!devres_open_group(adev->parent, NULL, GFP_KERNEL)) return -ENOMEM; /* @@ -577,14 +573,14 @@ static int component_bind(struct component *component, struct master *master, * at the appropriate moment. */ if (!devres_open_group(component->dev, component, GFP_KERNEL)) { - devres_release_group(master->parent, NULL); + devres_release_group(adev->parent, NULL); return -ENOMEM; } - dev_dbg(master->parent, "binding %s (ops %ps)\n", + dev_dbg(adev->parent, "binding %s (ops %ps)\n", dev_name(component->dev), component->ops); - ret = component->ops->bind(component->dev, master->parent, data); + ret = component->ops->bind(component->dev, adev->parent, data); if (!ret) { component->bound = true; @@ -595,16 +591,16 @@ static int component_bind(struct component *component, struct master *master, * can clean those resources up independently. */ devres_close_group(component->dev, NULL); - devres_remove_group(master->parent, NULL); + devres_remove_group(adev->parent, NULL); - dev_info(master->parent, "bound %s (ops %ps)\n", + dev_info(adev->parent, "bound %s (ops %ps)\n", dev_name(component->dev), component->ops); } else { devres_release_group(component->dev, NULL); - devres_release_group(master->parent, NULL); + devres_release_group(adev->parent, NULL); if (ret != -EPROBE_DEFER) - dev_err(master->parent, "failed to bind %s (ops %ps): %d\n", + dev_err(adev->parent, "failed to bind %s (ops %ps): %d\n", dev_name(component->dev), component->ops, ret); } @@ -622,31 +618,31 @@ static int component_bind(struct component *component, struct master *master, */ int component_bind_all(struct device *parent, void *data) { - struct master *master; + struct aggregate_device *adev; struct component *c; size_t i; int ret = 0; WARN_ON(!mutex_is_locked(&component_mutex)); - master = __master_find(parent, NULL); - if (!master) + adev = __aggregate_find(parent, NULL); + if (!adev) return -EINVAL; /* Bind components in match order */ - for (i = 0; i < master->match->num; i++) - if (!master->match->compare[i].duplicate) { - c = master->match->compare[i].component; - ret = component_bind(c, master, data); + for (i = 0; i < adev->match->num; i++) + if (!adev->match->compare[i].duplicate) { + c = adev->match->compare[i].component; + ret = component_bind(c, adev, data); if (ret) break; } if (ret != 0) { for (; i > 0; i--) - if (!master->match->compare[i - 1].duplicate) { - c = master->match->compare[i - 1].component; - component_unbind(c, master, data); + if (!adev->match->compare[i - 1].duplicate) { + c = adev->match->compare[i - 1].component; + component_unbind(c, adev, data); } } @@ -675,8 +671,8 @@ static int __component_add(struct device *dev, const struct component_ops *ops, ret = try_to_bring_up_masters(component); if (ret < 0) { - if (component->master) - remove_component(component->master, component); + if (component->adev) + remove_component(component->adev, component); list_del(&component->node); kfree(component); @@ -757,9 +753,9 @@ void component_del(struct device *dev, const struct component_ops *ops) break; } - if (component && component->master) { - take_down_master(component->master); - remove_component(component->master, component); + if (component && component->adev) { + take_down_aggregate_device(component->adev); + remove_component(component->adev, component); } mutex_unlock(&component_mutex); diff --git a/include/linux/component.h b/include/linux/component.h index 16de18f473d7..7012569c6546 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -38,10 +38,10 @@ int component_add_typed(struct device *dev, const struct component_ops *ops, int subcomponent); void component_del(struct device *, const struct component_ops *); -int component_bind_all(struct device *master, void *master_data); -void component_unbind_all(struct device *master, void *master_data); +int component_bind_all(struct device *parent, void *data); +void component_unbind_all(struct device *parent, void *data); -struct master; +struct aggregate_device; /** * struct component_master_ops - callback for the aggregate driver @@ -89,22 +89,22 @@ struct component_match; int component_master_add_with_match(struct device *, const struct component_master_ops *, struct component_match *); -void component_match_add_release(struct device *master, +void component_match_add_release(struct device *parent, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); -void component_match_add_typed(struct device *master, +void component_match_add_typed(struct device *parent, struct component_match **matchptr, int (*compare_typed)(struct device *, int, void *), void *compare_data); /** * component_match_add - add a component match entry - * @master: device with the aggregate driver + * @parent: device with the aggregate driver * @matchptr: pointer to the list of component matches * @compare: compare function to match against all components * @compare_data: opaque pointer passed to the @compare function * - * Adds a new component match to the list stored in @matchptr, which the @master + * Adds a new component match to the list stored in @matchptr, which the @parent * aggregate driver needs to function. The list of component matches pointed to * by @matchptr must be initialized to NULL before adding the first match. This * only matches against components added with component_add(). @@ -114,11 +114,11 @@ void component_match_add_typed(struct device *master, * * See also component_match_add_release() and component_match_add_typed(). */ -static inline void component_match_add(struct device *master, +static inline void component_match_add(struct device *parent, struct component_match **matchptr, int (*compare)(struct device *, void *), void *compare_data) { - component_match_add_release(master, matchptr, NULL, compare, + component_match_add_release(parent, matchptr, NULL, compare, compare_data); } -- cgit v1.2.3-71-gd317 From 31455bbda2081af83f72bb4636348b12b82c37c1 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 25 Jan 2022 01:58:36 +0100 Subject: spi: pxa2xx_spi: Convert to use GPIO descriptors This converts the PXA2xx SPI driver to use GPIO descriptors exclusively to retrieve GPIO chip select lines. The device tree and ACPI paths of the driver already use descriptors, hence ->use_gpio_descriptors is already set and this codepath is well tested. Convert all the PXA boards providing chip select GPIOs as platform data and drop the old GPIO chipselect handling in favor of the core managing it exclusively. Cc: Marek Vasut Cc: Daniel Mack Cc: Haojian Zhuang Cc: Robert Jarzmik Cc: linux-arm-kernel@lists.infradead.org Acked-by: Jonathan Cameron Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220125005836.494807-1-linus.walleij@linaro.org Signed-off-by: Mark Brown --- Documentation/spi/pxa2xx.rst | 3 -- arch/arm/mach-pxa/corgi.c | 26 ++++++++--------- arch/arm/mach-pxa/hx4700.c | 10 ++++++- arch/arm/mach-pxa/icontrol.c | 26 +++++++++++++---- arch/arm/mach-pxa/littleton.c | 10 ++++++- arch/arm/mach-pxa/magician.c | 12 ++++++-- arch/arm/mach-pxa/poodle.c | 14 ++++++---- arch/arm/mach-pxa/spitz.c | 26 ++++++++--------- arch/arm/mach-pxa/stargate2.c | 20 ++++++++++++-- arch/arm/mach-pxa/z2.c | 20 ++++++++++++-- drivers/spi/spi-pxa2xx.c | 63 +----------------------------------------- include/linux/spi/pxa2xx_spi.h | 1 - 12 files changed, 117 insertions(+), 114 deletions(-) (limited to 'include/linux') diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst index 6347580826be..716f65d87d04 100644 --- a/Documentation/spi/pxa2xx.rst +++ b/Documentation/spi/pxa2xx.rst @@ -101,7 +101,6 @@ device. All fields are optional. u8 rx_threshold; u8 dma_burst_size; u32 timeout; - int gpio_cs; }; The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are @@ -146,7 +145,6 @@ field. Below is a sample configuration using the PXA255 NSSP. .rx_threshold = 8, /* SSP hardward FIFO threshold */ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ .timeout = 235, /* See Intel documentation */ - .gpio_cs = 2, /* Use external chip select */ }; static struct pxa2xx_spi_chip cs8405a_chip_info = { @@ -154,7 +152,6 @@ field. Below is a sample configuration using the PXA255 NSSP. .rx_threshold = 8, /* SSP hardward FIFO threshold */ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ .timeout = 235, /* See Intel documentation */ - .gpio_cs = 3, /* Use external chip select */ }; static struct spi_board_info streetracer_spi_board_info[] __initdata = { diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 593c7f793da5..44659fbc37ba 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -530,6 +530,16 @@ static struct pxa2xx_spi_controller corgi_spi_info = { .num_chipselect = 3, }; +static struct gpiod_lookup_table corgi_spi_gpio_table = { + .dev_id = "pxa2xx-spi.1", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_MAX1111_CS, "cs", 2, GPIO_ACTIVE_LOW), + { }, + }, +}; + static void corgi_wait_for_hsync(void) { while (gpio_get_value(CORGI_GPIO_HSYNC)) @@ -548,10 +558,6 @@ static struct ads7846_platform_data corgi_ads7846_info = { .wait_for_sync = corgi_wait_for_hsync, }; -static struct pxa2xx_spi_chip corgi_ads7846_chip = { - .gpio_cs = CORGI_GPIO_ADS7846_CS, -}; - static void corgi_bl_kick_battery(void) { void (*kick_batt)(void); @@ -580,14 +586,6 @@ static struct corgi_lcd_platform_data corgi_lcdcon_info = { .kick_battery = corgi_bl_kick_battery, }; -static struct pxa2xx_spi_chip corgi_lcdcon_chip = { - .gpio_cs = CORGI_GPIO_LCDCON_CS, -}; - -static struct pxa2xx_spi_chip corgi_max1111_chip = { - .gpio_cs = CORGI_GPIO_MAX1111_CS, -}; - static struct spi_board_info corgi_spi_devices[] = { { .modalias = "ads7846", @@ -595,7 +593,6 @@ static struct spi_board_info corgi_spi_devices[] = { .bus_num = 1, .chip_select = 0, .platform_data = &corgi_ads7846_info, - .controller_data= &corgi_ads7846_chip, .irq = PXA_GPIO_TO_IRQ(CORGI_GPIO_TP_INT), }, { .modalias = "corgi-lcd", @@ -603,18 +600,17 @@ static struct spi_board_info corgi_spi_devices[] = { .bus_num = 1, .chip_select = 1, .platform_data = &corgi_lcdcon_info, - .controller_data= &corgi_lcdcon_chip, }, { .modalias = "max1111", .max_speed_hz = 450000, .bus_num = 1, .chip_select = 2, - .controller_data= &corgi_max1111_chip, }, }; static void __init corgi_init_spi(void) { + gpiod_add_lookup_table(&corgi_spi_gpio_table); pxa2xx_set_spi_info(1, &corgi_spi_info); gpiod_add_lookup_table(&corgi_lcdcon_gpio_table); spi_register_board_info(ARRAY_AND_SIZE(corgi_spi_devices)); diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 1d4c5db54be2..e1870fbb19e7 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -616,7 +616,6 @@ static struct pxa2xx_spi_chip tsc2046_chip = { .tx_threshold = 1, .rx_threshold = 2, .timeout = 64, - .gpio_cs = GPIO88_HX4700_TSC2046_CS, }; static struct spi_board_info tsc2046_board_info[] __initdata = { @@ -635,6 +634,14 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .enable_dma = 1, }; +static struct gpiod_lookup_table pxa_ssp2_gpio_table = { + .dev_id = "pxa2xx-spi.2", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * External power */ @@ -896,6 +903,7 @@ static void __init hx4700_init(void) pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(i2c_board_info)); i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info)); + gpiod_add_lookup_table(&pxa_ssp2_gpio_table); pxa2xx_set_spi_info(2, &pxa_ssp2_master_info); spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info)); diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index 04a12523cdee..753fe166ab68 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info1 = { .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = ICONTROL_MCP251x_nCS1 }; static struct pxa2xx_spi_chip mcp251x_chip_info2 = { @@ -50,7 +49,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info2 = { .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = ICONTROL_MCP251x_nCS2 }; static struct pxa2xx_spi_chip mcp251x_chip_info3 = { @@ -58,7 +56,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info3 = { .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = ICONTROL_MCP251x_nCS3 }; static struct pxa2xx_spi_chip mcp251x_chip_info4 = { @@ -66,7 +63,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = { .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = ICONTROL_MCP251x_nCS4 }; static const struct property_entry mcp251x_properties[] = { @@ -143,6 +139,24 @@ struct platform_device pxa_spi_ssp4 = { } }; +static struct gpiod_lookup_table pxa_ssp3_gpio_table = { + .dev_id = "pxa2xx-spi.3", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW), + { }, + }, +}; + +static struct gpiod_lookup_table pxa_ssp4_gpio_table = { + .dev_id = "pxa2xx-spi.4", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW), + { }, + }, +}; + static struct platform_device *icontrol_spi_devices[] __initdata = { &pxa_spi_ssp3, &pxa_spi_ssp4, @@ -175,6 +189,8 @@ static mfp_cfg_t mfp_can_cfg[] __initdata = { static void __init icontrol_can_init(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(mfp_can_cfg)); + gpiod_add_lookup_table(&pxa_ssp3_gpio_table); + gpiod_add_lookup_table(&pxa_ssp4_gpio_table); platform_add_devices(ARRAY_AND_SIZE(icontrol_spi_devices)); spi_register_board_info(ARRAY_AND_SIZE(mcp251x_board_info)); } diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 793f61375ee8..73f5953b3bb6 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -195,7 +195,6 @@ static struct pxa2xx_spi_controller littleton_spi_info = { static struct pxa2xx_spi_chip littleton_tdo24m_chip = { .rx_threshold = 1, .tx_threshold = 1, - .gpio_cs = LITTLETON_GPIO_LCD_CS, }; static struct spi_board_info littleton_spi_devices[] __initdata = { @@ -208,8 +207,17 @@ static struct spi_board_info littleton_spi_devices[] __initdata = { }, }; +static struct gpiod_lookup_table littleton_spi_gpio_table = { + .dev_id = "pxa2xx-spi.2", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + static void __init littleton_init_spi(void) { + gpiod_add_lookup_table(&littleton_spi_gpio_table); pxa2xx_set_spi_info(2, &littleton_spi_info); spi_register_board_info(ARRAY_AND_SIZE(littleton_spi_devices)); } diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index cd9fa465b9b2..200fd35168e0 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -938,8 +938,6 @@ struct pxa2xx_spi_chip tsc2046_chip_info = { .tx_threshold = 1, .rx_threshold = 2, .timeout = 64, - /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */ - .gpio_cs = GPIO14_MAGICIAN_TSC2046_CS, }; static struct pxa2xx_spi_controller magician_spi_info = { @@ -947,6 +945,15 @@ static struct pxa2xx_spi_controller magician_spi_info = { .enable_dma = 1, }; +static struct gpiod_lookup_table magician_spi_gpio_table = { + .dev_id = "pxa2xx-spi.2", + .table = { + /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */ + GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + static struct spi_board_info ads7846_spi_board_info[] __initdata = { { .modalias = "ads7846", @@ -1031,6 +1038,7 @@ static void __init magician_init(void) } else pr_err("LCD detection: CPLD mapping failed\n"); + gpiod_add_lookup_table(&magician_spi_gpio_table); pxa2xx_set_spi_info(2, &magician_spi_info); spi_register_board_info(ARRAY_AND_SIZE(ads7846_spi_board_info)); diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 3a4ecc3c8f8b..58cfa434afde 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -197,6 +197,14 @@ static struct pxa2xx_spi_controller poodle_spi_info = { .num_chipselect = 1, }; +static struct gpiod_lookup_table poodle_spi_gpio_table = { + .dev_id = "pxa2xx-spi.1", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", POODLE_GPIO_TP_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + static struct ads7846_platform_data poodle_ads7846_info = { .model = 7846, .vref_delay_usecs = 100, @@ -205,23 +213,19 @@ static struct ads7846_platform_data poodle_ads7846_info = { .gpio_pendown = POODLE_GPIO_TP_INT, }; -static struct pxa2xx_spi_chip poodle_ads7846_chip = { - .gpio_cs = POODLE_GPIO_TP_CS, -}; - static struct spi_board_info poodle_spi_devices[] = { { .modalias = "ads7846", .max_speed_hz = 10000, .bus_num = 1, .platform_data = &poodle_ads7846_info, - .controller_data= &poodle_ads7846_chip, .irq = PXA_GPIO_TO_IRQ(POODLE_GPIO_TP_INT), }, }; static void __init poodle_init_spi(void) { + gpiod_add_lookup_table(&poodle_spi_gpio_table); pxa2xx_set_spi_info(1, &poodle_spi_info); spi_register_board_info(ARRAY_AND_SIZE(poodle_spi_devices)); } diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 371008e9bb02..a648e7094e84 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -510,10 +510,6 @@ static struct ads7846_platform_data spitz_ads7846_info = { .wait_for_sync = spitz_ads7846_wait_for_hsync, }; -static struct pxa2xx_spi_chip spitz_ads7846_chip = { - .gpio_cs = SPITZ_GPIO_ADS7846_CS, -}; - static void spitz_bl_kick_battery(void) { void (*kick_batt)(void); @@ -555,14 +551,6 @@ static struct corgi_lcd_platform_data spitz_lcdcon_info = { .kick_battery = spitz_bl_kick_battery, }; -static struct pxa2xx_spi_chip spitz_lcdcon_chip = { - .gpio_cs = SPITZ_GPIO_LCDCON_CS, -}; - -static struct pxa2xx_spi_chip spitz_max1111_chip = { - .gpio_cs = SPITZ_GPIO_MAX1111_CS, -}; - static struct spi_board_info spitz_spi_devices[] = { { .modalias = "ads7846", @@ -570,7 +558,6 @@ static struct spi_board_info spitz_spi_devices[] = { .bus_num = 2, .chip_select = 0, .platform_data = &spitz_ads7846_info, - .controller_data = &spitz_ads7846_chip, .irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT), }, { .modalias = "corgi-lcd", @@ -578,13 +565,11 @@ static struct spi_board_info spitz_spi_devices[] = { .bus_num = 2, .chip_select = 1, .platform_data = &spitz_lcdcon_info, - .controller_data = &spitz_lcdcon_chip, }, { .modalias = "max1111", .max_speed_hz = 450000, .bus_num = 2, .chip_select = 2, - .controller_data = &spitz_max1111_chip, }, }; @@ -592,6 +577,16 @@ static struct pxa2xx_spi_controller spitz_spi_info = { .num_chipselect = 3, }; +static struct gpiod_lookup_table spitz_spi_gpio_table = { + .dev_id = "pxa2xx-spi.2", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_MAX1111_CS, "cs", 2, GPIO_ACTIVE_LOW), + { }, + }, +}; + static void __init spitz_spi_init(void) { if (machine_is_akita()) @@ -599,6 +594,7 @@ static void __init spitz_spi_init(void) else gpiod_add_lookup_table(&spitz_lcdcon_gpio_table); + gpiod_add_lookup_table(&spitz_spi_gpio_table); pxa2xx_set_spi_info(2, &spitz_spi_info); spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); } diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index 8ca02ec1d44c..b43e2f4536a5 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -346,6 +346,22 @@ static struct pxa2xx_spi_controller pxa_ssp_master_2_info = { .num_chipselect = 1, }; +static struct gpiod_lookup_table pxa_ssp1_gpio_table = { + .dev_id = "pxa2xx-spi.1", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", 24, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + +static struct gpiod_lookup_table pxa_ssp3_gpio_table = { + .dev_id = "pxa2xx-spi.3", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", 39, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + /* An upcoming kernel change will scrap SFRM usage so these * drivers have been moved to use GPIOs */ static struct pxa2xx_spi_chip staccel_chip_info = { @@ -353,7 +369,6 @@ static struct pxa2xx_spi_chip staccel_chip_info = { .rx_threshold = 8, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = 24, }; static struct pxa2xx_spi_chip cc2420_info = { @@ -361,7 +376,6 @@ static struct pxa2xx_spi_chip cc2420_info = { .rx_threshold = 8, .dma_burst_size = 8, .timeout = 235, - .gpio_cs = 39, }; static struct spi_board_info spi_board_info[] __initdata = { @@ -410,6 +424,8 @@ static void __init imote2_stargate2_init(void) pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); + gpiod_add_lookup_table(&pxa_ssp1_gpio_table); + gpiod_add_lookup_table(&pxa_ssp3_gpio_table); pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); pxa2xx_set_spi_info(2, &pxa_ssp_master_1_info); pxa2xx_set_spi_info(3, &pxa_ssp_master_2_info); diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 8e74fbb0a96e..7eaeda269927 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -570,7 +570,6 @@ static struct pxa2xx_spi_chip z2_lbs_chip_info = { .rx_threshold = 8, .tx_threshold = 8, .timeout = 1000, - .gpio_cs = GPIO24_ZIPITZ2_WIFI_CS, }; static struct libertas_spi_platform_data z2_lbs_pdata = { @@ -584,7 +583,6 @@ static struct pxa2xx_spi_chip lms283_chip_info = { .rx_threshold = 1, .tx_threshold = 1, .timeout = 64, - .gpio_cs = GPIO88_ZIPITZ2_LCD_CS, }; static struct gpiod_lookup_table lms283_gpio_table = { @@ -624,8 +622,26 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, }; +static struct gpiod_lookup_table pxa_ssp1_gpio_table = { + .dev_id = "pxa2xx-spi.1", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + +static struct gpiod_lookup_table pxa_ssp2_gpio_table = { + .dev_id = "pxa2xx-spi.2", + .table = { + GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, +}; + static void __init z2_spi_init(void) { + gpiod_add_lookup_table(&pxa_ssp1_gpio_table); + gpiod_add_lookup_table(&pxa_ssp2_gpio_table); pxa2xx_set_spi_info(1, &pxa_ssp1_master_info); pxa2xx_set_spi_info(2, &pxa_ssp2_master_info); gpiod_add_lookup_table(&lms283_gpio_table); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index e88f86274eeb..abb9f0ffd377 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -1163,57 +1162,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) return 0; } -static void cleanup_cs(struct spi_device *spi) -{ - if (!gpio_is_valid(spi->cs_gpio)) - return; - - gpio_free(spi->cs_gpio); - spi->cs_gpio = -ENOENT; -} - -static int setup_cs(struct spi_device *spi, struct chip_data *chip, - struct pxa2xx_spi_chip *chip_info) -{ - struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); - - if (chip == NULL) - return 0; - - if (chip_info == NULL) - return 0; - - if (drv_data->ssp_type == CE4100_SSP) - return 0; - - /* - * NOTE: setup() can be called multiple times, possibly with - * different chip_info, release previously requested GPIO. - */ - cleanup_cs(spi); - - if (gpio_is_valid(chip_info->gpio_cs)) { - int gpio = chip_info->gpio_cs; - int err; - - err = gpio_request(gpio, "SPI_CS"); - if (err) { - dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio); - return err; - } - - err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH)); - if (err) { - gpio_free(gpio); - return err; - } - - spi->cs_gpio = gpio; - } - - return 0; -} - static int setup(struct spi_device *spi) { struct pxa2xx_spi_chip *chip_info; @@ -1222,7 +1170,6 @@ static int setup(struct spi_device *spi) struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); uint tx_thres, tx_hi_thres, rx_thres; - int err; switch (drv_data->ssp_type) { case QUARK_X1000_SSP: @@ -1365,21 +1312,13 @@ static int setup(struct spi_device *spi) spi_set_ctldata(spi, chip); - if (drv_data->ssp_type == CE4100_SSP) - return 0; - - err = setup_cs(spi, chip, chip_info); - if (err) - kfree(chip); - - return err; + return 0; } static void cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); - cleanup_cs(spi); kfree(chip); } diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index ca74dce36706..4658e7801b42 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -42,7 +42,6 @@ struct pxa2xx_spi_chip { u8 rx_threshold; u8 dma_burst_size; u32 timeout; - int gpio_cs; }; #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) -- cgit v1.2.3-71-gd317 From d80976d9ffd9d7f89a26134a299b236910477f3b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 30 Nov 2021 16:27:55 +0100 Subject: dma-resv: some doc polish for iterators MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hammer it a bit more in that iterators can be restarted and when that matters, plus suggest to prefer the locked version whenver. Also delete the two leftover kerneldoc for static functions plus sprinkle some more links while at it. v2: Keep some comments (Christian) Reviewed-by: Christian König Signed-off-by: Daniel Vetter Cc: Sumit Semwal Cc: "Christian König" Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20211130152756.1388106-1-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 29 +++++++++++++++-------------- include/linux/dma-resv.h | 13 ++++++++++++- 2 files changed, 27 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6dd9a40b55d4..ee31f15d633a 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,12 +323,8 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); -/** - * dma_resv_iter_restart_unlocked - restart the unlocked iterator - * @cursor: The dma_resv_iter object to restart - * - * Restart the unlocked iteration by initializing the cursor object. - */ +/* Restart the iterator by initializing all the necessary fields, but not the + * relation to the dma_resv object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); @@ -344,14 +340,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) cursor->is_restarted = true; } -/** - * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj - * @cursor: cursor to record the current position - * - * Return all the fences in the dma_resv object which are not yet signaled. - * The returned fence has an extra local reference so will stay alive. - * If a concurrent modify is detected the whole iteration is started over again. - */ +/* Walk to the next not signaled fence and grab a reference to it */ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) { struct dma_resv *obj = cursor->obj; @@ -387,6 +376,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_first() whenver possible. + * * Returns the first fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) @@ -406,6 +401,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked); * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_next() whenver possible. + * * Returns the next fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) @@ -431,6 +430,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked); * dma_resv_iter_first - first fence from a locked dma_resv object * @cursor: cursor to record the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * * Return the first fence in the dma_resv object while holding the * &dma_resv.lock. */ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a715df97b31a..afdfdfac729f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -153,6 +153,13 @@ struct dma_resv { * struct dma_resv_iter - current position into the dma_resv fences * * Don't touch this directly in the driver, use the accessor function instead. + * + * IMPORTANT + * + * When using the lockless iterators like dma_resv_iter_next_unlocked() or + * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. + * Code which accumulates statistics or similar needs to check for this with + * dma_resv_iter_is_restarted(). */ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ @@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * &dma_resv.lock and using RCU instead. The cursor needs to be initialized * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside * the iterator a reference to the dma_fence is held and the RCU lock dropped. - * When the dma_resv is modified the iteration starts over again. + * + * Beware that the iterator can be restarted when the struct dma_resv for + * @cursor is modified. Code which accumulates statistics or similar needs to + * check for this with dma_resv_iter_is_restarted(). For this reason prefer the + * lock iterator dma_resv_for_each_fence() whenever possible. */ #define dma_resv_for_each_fence_unlocked(cursor, fence) \ for (fence = dma_resv_iter_first_unlocked(cursor); \ -- cgit v1.2.3-71-gd317 From 943515090ec67f81f6f93febfddb8c9118357e97 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Wed, 8 Dec 2021 09:34:22 +0100 Subject: firmware: qcom: scm: Add function to set the maximum IOMMU pool size This is not necessary for basic functionality of the IOMMU, but it's an optimization that tells to the TZ what's the maximum mappable size for the secure IOMMUs, so that it can optimize the data structures in the TZ itself. Signed-off-by: AngeloGioacchino Del Regno [Marijn: ported from 5.3 to the unified architecture in 5.11] Signed-off-by: Marijn Suijten Reviewed-by: Konrad Dybcio Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211208083423.22037-3-marijn.suijten@somainline.org --- drivers/firmware/qcom_scm.c | 15 +++++++++++++++ drivers/firmware/qcom_scm.h | 1 + include/linux/qcom_scm.h | 1 + 3 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 3f67bf774821..d5a9ba15e2ba 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -759,6 +759,21 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = size, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size); + int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size) diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index d92156ceb3ac..bb627941702b 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -100,6 +100,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 81cad9e1e412..8a065f8660c1 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -83,6 +83,7 @@ extern bool qcom_scm_restore_sec_cfg_available(void); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size); -- cgit v1.2.3-71-gd317 From 071a13332de894cb3c38b17c82350f1e4167c023 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Wed, 8 Dec 2021 09:34:23 +0100 Subject: firmware: qcom: scm: Add function to set IOMMU pagetable addressing Add a function to change the IOMMU pagetable addressing to AArch32 LPAE or AArch64. If doing that, then this must be done for each IOMMU context (not necessarily at the same time). Signed-off-by: AngeloGioacchino Del Regno [Marijn: ported from 5.3 to the unified architecture in 5.11] Signed-off-by: Marijn Suijten Reviewed-by: Konrad Dybcio Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211208083423.22037-4-marijn.suijten@somainline.org --- drivers/firmware/qcom_scm.c | 16 ++++++++++++++++ drivers/firmware/qcom_scm.h | 1 + include/linux/qcom_scm.h | 1 + 3 files changed, 18 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index d5a9ba15e2ba..6f7096120023 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1140,6 +1140,22 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) } EXPORT_SYMBOL(qcom_scm_hdcp_req); +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_PT_FORMAT, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = sec_id, + .args[1] = ctx_num, + .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format); + int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { struct qcom_scm_desc desc = { diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index bb627941702b..a348f2c214e5 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -120,6 +120,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 #define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_PT_FORMAT 0x01 #define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 #define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 8a065f8660c1..ca4a88d7cbdc 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -108,6 +108,7 @@ extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, -- cgit v1.2.3-71-gd317 From 6d3ac94bae21de6b6a1d81596752428960012816 Mon Sep 17 00:00:00 2001 From: Yang Guang Date: Fri, 14 Jan 2022 08:11:02 +0800 Subject: ssb: fix boolreturn.cocci warning The coccinelle report ./include/linux/ssb/ssb_driver_gige.h:98:8-9: WARNING: return of 0/1 in function 'ssb_gige_must_flush_posted_writes' with return type bool Return statements in functions returning bool should use true/false instead of 1/0. Reported-by: Zeal Robot Signed-off-by: Yang Guang Signed-off-by: David Yang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/fa4f1fa737e715eb62a85229ac5f12bae21145cf.1642065490.git.davidcomponentone@gmail.com --- include/linux/ssb/ssb_driver_gige.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 15ba0df1ee0d..28c145a51e57 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h @@ -95,7 +95,7 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (dev) return (dev->dev->bus->chip_id == 0x4785); - return 0; + return false; } /* Get the device MAC address */ -- cgit v1.2.3-71-gd317 From ef9989afda73332df566852d6e9ca695c05f10ce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:22 +0000 Subject: kvm: add guest_state_{enter,exit}_irqoff() When transitioning to/from guest mode, it is necessary to inform lockdep, tracing, and RCU in a specific order, similar to the requirements for transitions to/from user mode. Additionally, it is necessary to perform vtime accounting for a window around running the guest, with RCU enabled, such that timer interrupts taken from the guest can be accounted as guest time. Most architectures don't handle all the necessary pieces, and a have a number of common bugs, including unsafe usage of RCU during the window between guest_enter() and guest_exit(). On x86, this was dealt with across commits: 87fa7f3e98a1310e ("x86/kvm: Move context tracking where it belongs") 0642391e2139a2c1 ("x86/kvm/vmx: Add hardirq tracing to guest enter/exit") 9fc975e9efd03e57 ("x86/kvm/svm: Add hardirq tracing on guest enter/exit") 3ebccdf373c21d86 ("x86/kvm/vmx: Move guest enter/exit into .noinstr.text") 135961e0a7d555fc ("x86/kvm/svm: Move guest enter/exit into .noinstr.text") 160457140187c5fb ("KVM: x86: Defer vtime accounting 'til after IRQ handling") bc908e091b326467 ("KVM: x86: Consolidate guest enter/exit logic to common helpers") ... but those fixes are specific to x86, and as the resulting logic (while correct) is split across generic helper functions and x86-specific helper functions, it is difficult to see that the entry/exit accounting is balanced. This patch adds generic helpers which architectures can use to handle guest entry/exit consistently and correctly. The guest_{enter,exit}() helpers are split into guest_timing_{enter,exit}() to perform vtime accounting, and guest_context_{enter,exit}() to perform the necessary context tracking and RCU management. The existing guest_{enter,exit}() heleprs are left as wrappers of these. Atop this, new guest_state_enter_irqoff() and guest_state_exit_irqoff() helpers are added to handle the ordering of lockdep, tracing, and RCU manageent. These are inteneded to mirror exit_to_user_mode() and enter_from_user_mode(). Subsequent patches will migrate architectures over to the new helpers, following a sequence: guest_timing_enter_irqoff(); guest_state_enter_irqoff(); < run the vcpu > guest_state_exit_irqoff(); < take any pending IRQs > guest_timing_exit_irqoff(); This sequences handles all of the above correctly, and more clearly balances the entry and exit portions, making it easier to understand. The existing helpers are marked as deprecated, and will be removed once all architectures have been converted. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Reviewed-by: Paolo Bonzini Reviewed-by: Nicolas Saenz Julienne Message-Id: <20220201132926.3301912-2-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 112 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f079820f52b5..b3810976a27f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,7 +29,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -368,8 +370,11 @@ struct kvm_vcpu { u64 last_used_slot_gen; }; -/* must be called with irqs disabled */ -static __always_inline void guest_enter_irqoff(void) +/* + * Start accounting time towards a guest. + * Must be called before entering guest context. + */ +static __always_inline void guest_timing_enter_irqoff(void) { /* * This is running in ioctl context so its safe to assume that it's the @@ -378,7 +383,18 @@ static __always_inline void guest_enter_irqoff(void) instrumentation_begin(); vtime_account_guest_enter(); instrumentation_end(); +} +/* + * Enter guest context and enter an RCU extended quiescent state. + * + * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is + * unsafe to use any code which may directly or indirectly use RCU, tracing + * (including IRQ flag tracing), or lockdep. All code in this period must be + * non-instrumentable. + */ +static __always_inline void guest_context_enter_irqoff(void) +{ /* * KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode @@ -394,16 +410,79 @@ static __always_inline void guest_enter_irqoff(void) } } -static __always_inline void guest_exit_irqoff(void) +/* + * Deprecated. Architectures should move to guest_timing_enter_irqoff() and + * guest_state_enter_irqoff(). + */ +static __always_inline void guest_enter_irqoff(void) +{ + guest_timing_enter_irqoff(); + guest_context_enter_irqoff(); +} + +/** + * guest_state_enter_irqoff - Fixup state when entering a guest + * + * Entry to a guest will enable interrupts, but the kernel state is interrupts + * disabled when this is invoked. Also tell RCU about it. + * + * 1) Trace interrupts on state + * 2) Invoke context tracking if enabled to adjust RCU state + * 3) Tell lockdep that interrupts are enabled + * + * Invoked from architecture specific code before entering a guest. + * Must be called with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke guest_timing_enter_irqoff() before this. + * + * Note: this is analogous to exit_to_user_mode(). + */ +static __always_inline void guest_state_enter_irqoff(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + instrumentation_end(); + + guest_context_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +/* + * Exit guest context and exit an RCU extended quiescent state. + * + * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is + * unsafe to use any code which may directly or indirectly use RCU, tracing + * (including IRQ flag tracing), or lockdep. All code in this period must be + * non-instrumentable. + */ +static __always_inline void guest_context_exit_irqoff(void) { context_tracking_guest_exit(); +} +/* + * Stop accounting time towards a guest. + * Must be called after exiting guest context. + */ +static __always_inline void guest_timing_exit_irqoff(void) +{ instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ vtime_account_guest_exit(); instrumentation_end(); } +/* + * Deprecated. Architectures should move to guest_state_exit_irqoff() and + * guest_timing_exit_irqoff(). + */ +static __always_inline void guest_exit_irqoff(void) +{ + guest_context_exit_irqoff(); + guest_timing_exit_irqoff(); +} + static inline void guest_exit(void) { unsigned long flags; @@ -413,6 +492,33 @@ static inline void guest_exit(void) local_irq_restore(flags); } +/** + * guest_state_exit_irqoff - Establish state when returning from guest mode + * + * Entry from a guest disables interrupts, but guest mode is traced as + * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. + * + * 1) Tell lockdep that interrupts are disabled + * 2) Invoke context tracking if enabled to reactivate RCU + * 3) Trace interrupts off state + * + * Invoked from architecture specific code after exiting a guest. + * Must be invoked with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke guest_timing_exit_irqoff() after this. + * + * Note: this is analogous to enter_from_user_mode(). + */ +static __always_inline void guest_state_exit_irqoff(void) +{ + lockdep_hardirqs_off(CALLER_ADDR0); + guest_context_exit_irqoff(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + instrumentation_end(); +} + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* -- cgit v1.2.3-71-gd317 From 2220af8ca61ae67de4ec3deec1c6395a2f65b9fd Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 1 Feb 2022 14:06:47 +0100 Subject: power: supply: core: Refactor power_supply_set_input_current_limit_from_supplier() Some (USB) charger ICs have variants with USB D+ and D- pins to do their own builtin charger-type detection, like e.g. the bq24190 and bq25890 and also variants which lack this functionality, e.g. the bq24192 and bq25892. In case the charger-type; and thus the input-current-limit detection is done outside the charger IC then we need some way to communicate this to the charger IC. In the past extcon was used for this, but if the external detection does e.g. full USB PD negotiation then the extcon cable-types do not convey enough information. For these setups it was decided to model the external charging "brick" and the parameters negotiated with it as a power_supply class-device itself; and power_supply_set_input_current_limit_from_supplier() was introduced to allow drivers to get the input-current-limit this way. But in some cases psy drivers may want to know other properties, e.g. the bq25892 can do "quick-charge" negotiation by pulsing its current draw, but this should only be done if the usb_type psy-property of its supplier is set to DCP (and device-properties indicate the board allows higher voltages). Instead of adding extra helper functions for each property which a psy-driver wants to query from its supplier, refactor power_supply_set_input_current_limit_from_supplier() into a more generic power_supply_get_property_from_supplier() function. Reviewed-by: Andy Shevchenko Signed-off-by: Hans de Goede Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq24190_charger.c | 12 ++++++- drivers/power/supply/power_supply_core.c | 57 +++++++++++++++++--------------- include/linux/power_supply.h | 5 +-- 3 files changed, 44 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 06c34b09349c..a1c957a26f07 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1206,8 +1206,18 @@ static void bq24190_input_current_limit_work(struct work_struct *work) struct bq24190_dev_info *bdi = container_of(work, struct bq24190_dev_info, input_current_limit_work.work); + union power_supply_propval val; + int ret; + + ret = power_supply_get_property_from_supplier(bdi->charger, + POWER_SUPPLY_PROP_CURRENT_MAX, + &val); + if (ret) + return; - power_supply_set_input_current_limit_from_supplier(bdi->charger); + bq24190_charger_set_property(bdi->charger, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + &val); } /* Sync the input-current-limit with our parent supply (if we have one) */ diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index ec838c9bcc0a..df4471e50d33 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -376,46 +376,49 @@ int power_supply_is_system_supplied(void) } EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); -static int __power_supply_get_supplier_max_current(struct device *dev, - void *data) +struct psy_get_supplier_prop_data { + struct power_supply *psy; + enum power_supply_property psp; + union power_supply_propval *val; +}; + +static int __power_supply_get_supplier_property(struct device *dev, void *_data) { - union power_supply_propval ret = {0,}; struct power_supply *epsy = dev_get_drvdata(dev); - struct power_supply *psy = data; + struct psy_get_supplier_prop_data *data = _data; - if (__power_supply_is_supplied_by(epsy, psy)) - if (!epsy->desc->get_property(epsy, - POWER_SUPPLY_PROP_CURRENT_MAX, - &ret)) - return ret.intval; + if (__power_supply_is_supplied_by(epsy, data->psy)) + if (!epsy->desc->get_property(epsy, data->psp, data->val)) + return 1; /* Success */ - return 0; + return 0; /* Continue iterating */ } -int power_supply_set_input_current_limit_from_supplier(struct power_supply *psy) +int power_supply_get_property_from_supplier(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) { - union power_supply_propval val = {0,}; - int curr; - - if (!psy->desc->set_property) - return -EINVAL; + struct psy_get_supplier_prop_data data = { + .psy = psy, + .psp = psp, + .val = val, + }; + int ret; /* * This function is not intended for use with a supply with multiple - * suppliers, we simply pick the first supply to report a non 0 - * max-current. + * suppliers, we simply pick the first supply to report the psp. */ - curr = class_for_each_device(power_supply_class, NULL, psy, - __power_supply_get_supplier_max_current); - if (curr <= 0) - return (curr == 0) ? -ENODEV : curr; - - val.intval = curr; + ret = class_for_each_device(power_supply_class, NULL, &data, + __power_supply_get_supplier_property); + if (ret < 0) + return ret; + if (ret == 0) + return -ENODEV; - return psy->desc->set_property(psy, - POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val); + return 0; } -EXPORT_SYMBOL_GPL(power_supply_set_input_current_limit_from_supplier); +EXPORT_SYMBOL_GPL(power_supply_get_property_from_supplier); int power_supply_set_battery_charged(struct power_supply *psy) { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index e218041cc000..006111917d1a 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -597,8 +597,9 @@ power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table int table_len, int temp); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); -extern int power_supply_set_input_current_limit_from_supplier( - struct power_supply *psy); +int power_supply_get_property_from_supplier(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); extern int power_supply_set_battery_charged(struct power_supply *psy); #ifdef CONFIG_POWER_SUPPLY -- cgit v1.2.3-71-gd317 From 79d35365a5858466ff7b37aaf1fcf11b683b9442 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 1 Feb 2022 14:06:56 +0100 Subject: power: supply: bq25890: Add support for registering the Vbus boost converter as a regulator The bq25890_charger code supports enabling/disabling the boost converter based on usb-phy notifications. But the usb-phy framework is not used on all boards/platforms. At support for registering the Vbus boost converter as a standard regulator when there is no usb-phy on the board. Also add support for providing regulator_init_data through platform_data for use on boards where device-tree is not used and the platform code must thus provide the regulator_init_data. Reviewed-by: Andy Shevchenko Signed-off-by: Hans de Goede Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq25890_charger.c | 80 ++++++++++++++++++++++++++++++++++ include/linux/power/bq25890_charger.h | 15 +++++++ 2 files changed, 95 insertions(+) create mode 100644 include/linux/power/bq25890_charger.h (limited to 'include/linux') diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 162bffb02410..637cdd3b6b11 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -8,7 +8,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -876,6 +878,45 @@ static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val, return NOTIFY_OK; } +#ifdef CONFIG_REGULATOR +static int bq25890_vbus_enable(struct regulator_dev *rdev) +{ + struct bq25890_device *bq = rdev_get_drvdata(rdev); + + return bq25890_set_otg_cfg(bq, 1); +} + +static int bq25890_vbus_disable(struct regulator_dev *rdev) +{ + struct bq25890_device *bq = rdev_get_drvdata(rdev); + + return bq25890_set_otg_cfg(bq, 0); +} + +static int bq25890_vbus_is_enabled(struct regulator_dev *rdev) +{ + struct bq25890_device *bq = rdev_get_drvdata(rdev); + + return bq25890_field_read(bq, F_OTG_CFG); +} + +static const struct regulator_ops bq25890_vbus_ops = { + .enable = bq25890_vbus_enable, + .disable = bq25890_vbus_disable, + .is_enabled = bq25890_vbus_is_enabled, +}; + +static const struct regulator_desc bq25890_vbus_desc = { + .name = "usb_otg_vbus", + .of_match = "usb-otg-vbus", + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &bq25890_vbus_ops, + .fixed_uV = 5000000, + .n_voltages = 1, +}; +#endif + static int bq25890_get_chip_version(struct bq25890_device *bq) { int id, rev; @@ -1075,6 +1116,22 @@ static int bq25890_probe(struct i2c_client *client, bq->usb_nb.notifier_call = bq25890_usb_notifier; usb_register_notifier(bq->usb_phy, &bq->usb_nb); } +#ifdef CONFIG_REGULATOR + else { + struct bq25890_platform_data *pdata = dev_get_platdata(dev); + struct regulator_config cfg = { }; + struct regulator_dev *reg; + + cfg.dev = dev; + cfg.driver_data = bq; + if (pdata) + cfg.init_data = pdata->regulator_init_data; + + reg = devm_regulator_register(dev, &bq25890_vbus_desc, &cfg); + if (IS_ERR(reg)) + return dev_err_probe(dev, PTR_ERR(reg), "registering regulator"); + } +#endif ret = bq25890_power_supply_init(bq); if (ret < 0) { @@ -1113,6 +1170,28 @@ static int bq25890_remove(struct i2c_client *client) return 0; } +static void bq25890_shutdown(struct i2c_client *client) +{ + struct bq25890_device *bq = i2c_get_clientdata(client); + + /* + * TODO this if + return should probably be removed, but that would + * introduce a function change for boards using the usb-phy framework. + * This needs to be tested on such a board before making this change. + */ + if (!IS_ERR_OR_NULL(bq->usb_phy)) + return; + + /* + * Turn off the 5v Boost regulator which outputs Vbus to the device's + * Micro-USB or Type-C USB port. Leaving this on drains power and + * this avoids the PMIC on some device-models seeing this as Vbus + * getting inserted after shutdown, causing the device to immediately + * power-up again. + */ + bq25890_set_otg_cfg(bq, 0); +} + #ifdef CONFIG_PM_SLEEP static int bq25890_suspend(struct device *dev) { @@ -1192,6 +1271,7 @@ static struct i2c_driver bq25890_driver = { }, .probe = bq25890_probe, .remove = bq25890_remove, + .shutdown = bq25890_shutdown, .id_table = bq25890_i2c_ids, }; module_i2c_driver(bq25890_driver); diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h new file mode 100644 index 000000000000..c706ddb77a08 --- /dev/null +++ b/include/linux/power/bq25890_charger.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Platform data for the TI bq25890 battery charger driver. + */ + +#ifndef _BQ25890_CHARGER_H_ +#define _BQ25890_CHARGER_H_ + +struct regulator_init_data; + +struct bq25890_platform_data { + const struct regulator_init_data *regulator_init_data; +}; + +#endif -- cgit v1.2.3-71-gd317 From 3afcbe09470091ca8a8048ef7c96701839a70961 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 1 Feb 2022 14:07:00 +0100 Subject: mfd: intel_soc_pmic_chtwc: Add cht_wc_model data to struct intel_soc_pmic Tablet / laptop designs using an Intel Cherry Trail x86 main SoC with an Intel Whiskey Cove PMIC do not use a single standard setup for the charger, fuel-gauge and other chips surrounding the PMIC / charging+data USB port. Unlike what is normal on x86 this diversity in designs is not handled by the ACPI tables. On 2 of the 3 known designs there are no standard (PNP0C0A) ACPI battery devices and on the 3th design the ACPI battery device does not work under Linux due to it requiring non-standard and undocumented ACPI behavior. So to make things work under Linux we use native charger and fuel-gauge drivers on these devices, re-using the native drivers used on ARM boards with the same charger / fuel-gauge ICs. This requires various MFD-cell drivers for the CHT-WC PMIC cells to know which model they are exactly running on so that they can e.g. instantiate an I2C-client for the right model charger-IC (the charger is connected to an I2C-controller which is part of the PMIC). Rather then duplicating DMI-id matching to check which model we are running on in each MFD-cell driver, add a check for this to the shared drivers/mfd/intel_soc_pmic_chtwc.c code by using a DMI table for all 3 known models: 1. The GPD Win and GPD Pocket mini-laptops, these are really 2 models but the Pocket re-uses the GPD Win's design in a different housing: The WC PMIC is connected to a TI BQ24292i charger, paired with a Maxim MAX17047 fuelgauge + a FUSB302 USB Type-C Controller + a PI3USB30532 USB switch, for a fully functional Type-C port. 2. The Xiaomi Mi Pad 2: The WC PMIC is connected to a TI BQ25890 charger, paired with a TI BQ27520 fuelgauge, using the TI BQ25890 for BC1.2 charger type detection, for a USB-2 only Type-C port without PD. 3. The Lenovo Yoga Book YB1-X90 / Lenovo Yoga Book YB1-X91 series: The WC PMIC is connected to a TI BQ25892 charger, paired with a TI BQ27542 fuelgauge, using the WC PMIC for BC1.2 charger type detection and using the BQ25892's Mediatek Pump Express+ (1.0) support to enable charging with up to 12V through a micro-USB port. Reviewed-by: Andy Shevchenko Acked-by: Lee Jones Signed-off-by: Hans de Goede Signed-off-by: Sebastian Reichel --- drivers/mfd/intel_soc_pmic_chtwc.c | 40 ++++++++++++++++++++++++++++++++++++++ include/linux/mfd/intel_soc_pmic.h | 8 ++++++++ 2 files changed, 48 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c index 49c5f71664bc..4eab191e053a 100644 --- a/drivers/mfd/intel_soc_pmic_chtwc.c +++ b/drivers/mfd/intel_soc_pmic_chtwc.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -134,9 +135,44 @@ static const struct regmap_irq_chip cht_wc_regmap_irq_chip = { .num_regs = 1, }; +static const struct dmi_system_id cht_wc_model_dmi_ids[] = { + { + /* GPD win / GPD pocket mini laptops */ + .driver_data = (void *)(long)INTEL_CHT_WC_GPD_WIN_POCKET, + /* + * This DMI match may not seem unique, but it is. In the 67000+ + * DMI decode dumps from linux-hardware.org only 116 have + * board_vendor set to "AMI Corporation" and of those 116 only + * the GPD win's and pocket's board_name is "Default string". + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + }, { + /* Xiaomi Mi Pad 2 */ + .driver_data = (void *)(long)INTEL_CHT_WC_XIAOMI_MIPAD2, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), + }, + }, { + /* Lenovo Yoga Book X90F / X91F / X91L */ + .driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YOGABOOK1, + .matches = { + /* Non exact match to match all versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }, + }, + { } +}; + static int cht_wc_probe(struct i2c_client *client) { struct device *dev = &client->dev; + const struct dmi_system_id *id; struct intel_soc_pmic *pmic; acpi_status status; unsigned long long hrv; @@ -160,6 +196,10 @@ static int cht_wc_probe(struct i2c_client *client) if (!pmic) return -ENOMEM; + id = dmi_first_match(cht_wc_model_dmi_ids); + if (id) + pmic->cht_wc_model = (long)id->driver_data; + pmic->irq = client->irq; pmic->dev = dev; i2c_set_clientdata(client, pmic); diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index 6a88e34cb955..945bde1fe55c 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -13,6 +13,13 @@ #include +enum intel_cht_wc_models { + INTEL_CHT_WC_UNKNOWN, + INTEL_CHT_WC_GPD_WIN_POCKET, + INTEL_CHT_WC_XIAOMI_MIPAD2, + INTEL_CHT_WC_LENOVO_YOGABOOK1, +}; + /** * struct intel_soc_pmic - Intel SoC PMIC data * @irq: Master interrupt number of the parent PMIC device @@ -39,6 +46,7 @@ struct intel_soc_pmic { struct regmap_irq_chip_data *irq_chip_data_crit; struct device *dev; struct intel_scu_ipc_dev *scu; + enum intel_cht_wc_models cht_wc_model; }; int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, -- cgit v1.2.3-71-gd317 From ab28e944197fa78e6af7c4a0ffd6bba9a5bbacf0 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 31 Jan 2022 15:01:11 -0800 Subject: topology/sysfs: Add PPIN in sysfs under cpu topology PPIN is the Protected Processor Identification Number. This is used to identify the socket as a Field Replaceable Unit (FRU). Existing code only displays this when reporting errors. But this makes it inconvenient for large clusters to use it for its intended purpose of inventory control. Add ppin to /sys/devices/system/cpu/cpu*/topology to make what is already available using RDMSR more easily accessible. Make the file read only for root in case there are still people concerned about making a unique system "serial number" available. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Acked-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20220131230111.2004669-6-tony.luck@intel.com --- Documentation/ABI/stable/sysfs-devices-system-cpu | 4 ++++ Documentation/ABI/testing/sysfs-devices-system-cpu | 6 ++++++ arch/x86/include/asm/topology.h | 1 + drivers/base/topology.c | 4 ++++ include/linux/topology.h | 3 +++ 5 files changed, 18 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/stable/sysfs-devices-system-cpu b/Documentation/ABI/stable/sysfs-devices-system-cpu index 3965ce504484..902392d7eddf 100644 --- a/Documentation/ABI/stable/sysfs-devices-system-cpu +++ b/Documentation/ABI/stable/sysfs-devices-system-cpu @@ -86,6 +86,10 @@ What: /sys/devices/system/cpu/cpuX/topology/die_cpus Description: internal kernel map of CPUs within the same die. Values: hexadecimal bitmask. +What: /sys/devices/system/cpu/cpuX/topology/ppin +Description: per-socket protected processor inventory number +Values: hexadecimal. + What: /sys/devices/system/cpu/cpuX/topology/die_cpus_list Description: human-readable list of CPUs within the same die. The format is like 0-3, 8-11, 14,17. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 61f5676a7429..74962c200790 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -73,6 +73,7 @@ What: /sys/devices/system/cpu/cpuX/topology/core_id /sys/devices/system/cpu/cpuX/topology/physical_package_id /sys/devices/system/cpu/cpuX/topology/thread_siblings /sys/devices/system/cpu/cpuX/topology/thread_siblings_list + /sys/devices/system/cpu/cpuX/topology/ppin Date: December 2008 Contact: Linux kernel mailing list Description: CPU topology files that describe a logical CPU's relationship @@ -103,6 +104,11 @@ Description: CPU topology files that describe a logical CPU's relationship thread_siblings_list: human-readable list of cpuX's hardware threads within the same core as cpuX + ppin: human-readable Protected Processor Identification + Number of the socket the cpu# belongs to. There should be + one per physical_package_id. File is readable only to + admin. + See Documentation/admin-guide/cputopology.rst for more information. diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 2f0b6be8eaab..43a89476a522 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -110,6 +110,7 @@ extern const struct cpumask *cpu_clustergroup_mask(int cpu); #define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id) #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) +#define topology_ppin(cpu) (cpu_data(cpu).ppin) extern unsigned int __max_die_per_package; diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 044f3664f8f2..e9d1efcda89b 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -58,6 +58,9 @@ static DEVICE_ATTR_RO(cluster_id); define_id_show_func(core_id, "%d"); static DEVICE_ATTR_RO(core_id); +define_id_show_func(ppin, "0x%llx"); +static DEVICE_ATTR_ADMIN_RO(ppin); + define_siblings_read_func(thread_siblings, sibling_cpumask); static BIN_ATTR_RO(thread_siblings, 0); static BIN_ATTR_RO(thread_siblings_list, 0); @@ -145,6 +148,7 @@ static struct attribute *default_attrs[] = { #ifdef TOPOLOGY_DRAWER_SYSFS &dev_attr_drawer_id.attr, #endif + &dev_attr_ppin.attr, NULL }; diff --git a/include/linux/topology.h b/include/linux/topology.h index a6e201758ae9..f19bc3626297 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -211,6 +211,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_drawer_id #define topology_drawer_id(cpu) ((void)(cpu), -1) #endif +#ifndef topology_ppin +#define topology_ppin(cpu) ((void)(cpu), 0ull) +#endif #ifndef topology_sibling_cpumask #define topology_sibling_cpumask(cpu) cpumask_of(cpu) #endif -- cgit v1.2.3-71-gd317 From bee9f65523218e3baeeecde9295c8fbe9bc08e0a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 27 Jan 2022 16:02:50 +0000 Subject: netfs, cachefiles: Add a method to query presence of data in the cache Add a netfs_cache_ops method by which a network filesystem can ask the cache about what data it has available and where so that it can make a multipage read more efficient. Signed-off-by: David Howells cc: linux-cachefs@redhat.com Acked-by: Jeff Layton Reviewed-by: Rohith Surabattula Signed-off-by: Steve French --- Documentation/filesystems/netfs_library.rst | 16 ++++++++ fs/cachefiles/io.c | 59 +++++++++++++++++++++++++++++ include/linux/netfs.h | 7 ++++ 3 files changed, 82 insertions(+) (limited to 'include/linux') diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 136f8da3d0e2..4f373a8ec47b 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -462,6 +462,10 @@ operation table looks like the following:: struct iov_iter *iter, netfs_io_terminated_t term_func, void *term_func_priv); + + int (*query_occupancy)(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len); }; With a termination handler function pointer:: @@ -536,6 +540,18 @@ The methods defined in the table are: indicating whether the termination is definitely happening in the caller's context. + * ``query_occupancy()`` + + [Required] Called to find out where the next piece of data is within a + particular region of the cache. The start and length of the region to be + queried are passed in, along with the granularity to which the answer needs + to be aligned. The function passes back the start and length of the data, + if any, available within that region. Note that there may be a hole at the + front. + + It returns 0 if some data was found, -ENODATA if there was no usable data + within the region or -ENOBUFS if there is no caching on this file. + Note that these methods are passed a pointer to the cache resource structure, not the read request structure as they could be used in other situations where there isn't a read request structure as well, such as writing dirty data to the diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 04eb52736990..753986ea1583 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -191,6 +191,64 @@ presubmission_error: return ret; } +/* + * Query the occupancy of the cache in a region, returning where the next chunk + * of data starts and how long it is. + */ +static int cachefiles_query_occupancy(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len) +{ + struct cachefiles_object *object; + struct file *file; + loff_t off, off2; + + *_data_start = -1; + *_data_len = 0; + + if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ)) + return -ENOBUFS; + + object = cachefiles_cres_object(cres); + file = cachefiles_cres_file(cres); + granularity = max_t(size_t, object->volume->cache->bsize, granularity); + + _enter("%pD,%li,%llx,%zx/%llx", + file, file_inode(file)->i_ino, start, len, + i_size_read(file_inode(file))); + + off = cachefiles_inject_read_error(); + if (off == 0) + off = vfs_llseek(file, start, SEEK_DATA); + if (off == -ENXIO) + return -ENODATA; /* Beyond EOF */ + if (off < 0 && off >= (loff_t)-MAX_ERRNO) + return -ENOBUFS; /* Error. */ + if (round_up(off, granularity) >= start + len) + return -ENODATA; /* No data in range */ + + off2 = cachefiles_inject_read_error(); + if (off2 == 0) + off2 = vfs_llseek(file, off, SEEK_HOLE); + if (off2 == -ENXIO) + return -ENODATA; /* Beyond EOF */ + if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO) + return -ENOBUFS; /* Error. */ + + /* Round away partial blocks */ + off = round_up(off, granularity); + off2 = round_down(off2, granularity); + if (off2 <= off) + return -ENODATA; + + *_data_start = off; + if (off2 > start + len) + *_data_len = len; + else + *_data_len = off2 - off; + return 0; +} + /* * Handle completion of a write to the cache. */ @@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { .write = cachefiles_write, .prepare_read = cachefiles_prepare_read, .prepare_write = cachefiles_prepare_write, + .query_occupancy = cachefiles_query_occupancy, }; /* diff --git a/include/linux/netfs.h b/include/linux/netfs.h index b46c39d98bbd..614f22213e21 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -244,6 +244,13 @@ struct netfs_cache_ops { int (*prepare_write)(struct netfs_cache_resources *cres, loff_t *_start, size_t *_len, loff_t i_size, bool no_space_allocated_yet); + + /* Query the occupancy of the cache in a region, returning where the + * next chunk of data starts and how long it is. + */ + int (*query_occupancy)(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len); }; struct readahead_control; -- cgit v1.2.3-71-gd317 From 90b2433edb6d995bd23d6adde753095b4ab26104 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Mon, 31 Jan 2022 15:22:42 -0300 Subject: seq_file: fix NULL pointer arithmetic warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement conditional logic in order to replace NULL pointer arithmetic. The use of NULL pointer arithmetic was pointed out by clang with the following warning: fs/kernfs/file.c:128:15: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic] return NULL + !*ppos; ~~~~ ^ fs/seq_file.c:559:14: warning: performing pointer arithmetic on a null pointer has undefined behavior [-Wnull-pointer-arithmetic] return NULL + (*pos == 0); Signed-off-by: Maíra Canal Signed-off-by: Al Viro --- fs/kernfs/file.c | 7 +------ fs/seq_file.c | 4 ++-- include/linux/seq_file.h | 1 + 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 9414a7a60a9f..7aefaca876a0 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -120,13 +120,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) if (next == ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, next); return next; - } else { - /* - * The same behavior and code as single_open(). Returns - * !NULL if pos is at the beginning; otherwise, NULL. - */ - return NULL + !*ppos; } + return single_start(sf, ppos); } static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) diff --git a/fs/seq_file.c b/fs/seq_file.c index f8e1f4ee87ff..7ab8a58c29b6 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -554,9 +554,9 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc) } EXPORT_SYMBOL(seq_dentry); -static void *single_start(struct seq_file *p, loff_t *pos) +void *single_start(struct seq_file *p, loff_t *pos) { - return NULL + (*pos == 0); + return *pos ? NULL : SEQ_START_TOKEN; } static void *single_next(struct seq_file *p, void *v, loff_t *pos) diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 88cc16444b43..60820ab511d2 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *); int seq_path_root(struct seq_file *m, const struct path *path, const struct path *root, const char *esc); +void *single_start(struct seq_file *, loff_t *); int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); int single_release(struct inode *, struct file *); -- cgit v1.2.3-71-gd317 From 6d5c900eb64107001e91e1f46bddc254dded8a59 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 Jan 2022 09:22:41 -0800 Subject: net/mlx5e: Use struct_group() for memcpy() region In preparation for FORTIFY_SOURCE performing compile-time and run-time field bounds checking for memcpy(), memmove(), and memset(), avoid intentionally writing across neighboring fields. Use struct_group() in struct vlan_ethhdr around members h_dest and h_source, so they can be referenced together. This will allow memcpy() and sizeof() to more easily reason about sizes, improve readability, and avoid future warnings about writing beyond the end of h_dest. "pahole" shows no size nor member offset changes to struct vlan_ethhdr. "objdump -d" shows no object code changes. Fixes: 34802a42b352 ("net/mlx5e: Do not modify the TX SKB") Signed-off-by: Kees Cook Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- include/linux/if_vlan.h | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 7fd33b356cc8..ee7ecb88adc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, skb->data, cpy1_sz); + memcpy(&vhdr->addrs, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8420fe504927..2be4dd7e90a9 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -46,8 +46,10 @@ struct vlan_hdr { * @h_vlan_encapsulated_proto: packet type ID or len */ struct vlan_ethhdr { - unsigned char h_dest[ETH_ALEN]; - unsigned char h_source[ETH_ALEN]; + struct_group(addrs, + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + ); __be16 h_vlan_proto; __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; -- cgit v1.2.3-71-gd317 From c8eaf6ac76f40f6c59fc7d056e2e08c4a57ea9c7 Mon Sep 17 00:00:00 2001 From: Zhen Ni Date: Fri, 28 Jan 2022 17:50:25 +0800 Subject: sched: move autogroup sysctls into its own file move autogroup sysctls to autogroup.c and use the new register_sysctl_init() to register the sysctl interface. Signed-off-by: Zhen Ni Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220128095025.8745-1-nizhen@uniontech.com --- include/linux/sched/sysctl.h | 4 ---- kernel/sched/autogroup.c | 23 +++++++++++++++++++++++ kernel/sched/autogroup.h | 1 + kernel/sysctl.c | 11 ----------- 4 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index c19dd5a2c05c..3f2b70f8d32c 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -45,10 +45,6 @@ extern unsigned int sysctl_sched_uclamp_util_min_rt_default; extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif -#ifdef CONFIG_SCHED_AUTOGROUP -extern unsigned int sysctl_sched_autogroup_enabled; -#endif - extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 8629b37d118e..31dd2593145e 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -9,6 +9,28 @@ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; +#ifdef CONFIG_SYSCTL +static struct ctl_table sched_autogroup_sysctls[] = { + { + .procname = "sched_autogroup_enabled", + .data = &sysctl_sched_autogroup_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; + +static void __init sched_autogroup_sysctl_init(void) +{ + register_sysctl_init("kernel", sched_autogroup_sysctls); +} +#else +#define sched_autogroup_sysctl_init() do { } while (0) +#endif + void __init autogroup_init(struct task_struct *init_task) { autogroup_default.tg = &root_task_group; @@ -198,6 +220,7 @@ void sched_autogroup_exit(struct signal_struct *sig) static int __init setup_autogroup(char *str) { sysctl_sched_autogroup_enabled = 0; + sched_autogroup_sysctl_init(); return 1; } diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index b96419974a1f..90fcbfdd70c3 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -27,6 +27,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); static inline struct task_group * autogroup_task_group(struct task_struct *p, struct task_group *tg) { + extern unsigned int sysctl_sched_autogroup_enabled; int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); if (enabled && task_wants_autogroup(p, tg)) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 5ae443b2882e..1cb7ca68cd4e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1750,17 +1750,6 @@ static struct ctl_table kern_table[] = { .proc_handler = sysctl_sched_uclamp_handler, }, #endif -#ifdef CONFIG_SCHED_AUTOGROUP - { - .procname = "sched_autogroup_enabled", - .data = &sysctl_sched_autogroup_enabled, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, -#endif #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", -- cgit v1.2.3-71-gd317 From 1148836fd3226c20de841084aba24184d4fbbe77 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 2 Feb 2022 14:55:29 +0100 Subject: Revert "fbdev: Garbage collect fbdev scrolling acceleration, part 1 (from TODO list)" This reverts commit b3ec8cdf457e5e63d396fe1346cc788cf7c1b578. Revert the second (of 2) commits which disabled scrolling acceleration in fbcon/fbdev. It introduced a regression for fbdev-supported graphic cards because of the performance penalty by doing screen scrolling by software instead of using the existing graphic card 2D hardware acceleration. Console scrolling acceleration was disabled by dropping code which checked at runtime the driver hardware capabilities for the BINFO_HWACCEL_COPYAREA or FBINFO_HWACCEL_FILLRECT flags and if set, it enabled scrollmode SCROLL_MOVE which uses hardware acceleration to move screen contents. After dropping those checks scrollmode was hard-wired to SCROLL_REDRAW instead, which forces all graphic cards to redraw every character at the new screen position when scrolling. This change effectively disabled all hardware-based scrolling acceleration for ALL drivers, because now all kind of 2D hardware acceleration (bitblt, fillrect) in the drivers isn't used any longer. The original commit message mentions that only 3 DRM drivers (nouveau, omapdrm and gma500) used hardware acceleration in the past and thus code for checking and using scrolling acceleration is obsolete. This statement is NOT TRUE, because beside the DRM drivers there are around 35 other fbdev drivers which depend on fbdev/fbcon and still provide hardware acceleration for fbdev/fbcon. The original commit message also states that syzbot found lots of bugs in fbcon and thus it's "often the solution to just delete code and remove features". This is true, and the bugs - which actually affected all users of fbcon, including DRM - were fixed, or code was dropped like e.g. the support for software scrollback in vgacon (commit 973c096f6a85). So to further analyze which bugs were found by syzbot, I've looked through all patches in drivers/video which were tagged with syzbot or syzkaller back to year 2005. The vast majority fixed the reported issues on a higher level, e.g. when screen is to be resized, or when font size is to be changed. The few ones which touched driver code fixed a real driver bug, e.g. by adding a check. But NONE of those patches touched code of either the SCROLL_MOVE or the SCROLL_REDRAW case. That means, there was no real reason why SCROLL_MOVE had to be ripped-out and just SCROLL_REDRAW had to be used instead. The only reason I can imagine so far was that SCROLL_MOVE wasn't used by DRM and as such it was assumed that it could go away. That argument completely missed the fact that SCROLL_MOVE is still heavily used by fbdev (non-DRM) drivers. Some people mention that using memcpy() instead of the hardware acceleration is pretty much the same speed. But that's not true, at least not for older graphic cards and machines where we see speed decreases by factor 10 and more and thus this change leads to console responsiveness way worse than before. That's why the original commit is to be reverted. By reverting we reintroduce hardware-based scrolling acceleration and fix the performance regression for fbdev drivers. There isn't any impact on DRM when reverting those patches. Signed-off-by: Helge Deller Acked-by: Geert Uytterhoeven Acked-by: Sven Schnelle Cc: stable@vger.kernel.org # v5.16+ Signed-off-by: Helge Deller Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220202135531.92183-2-deller@gmx.de --- Documentation/gpu/todo.rst | 13 +- drivers/video/fbdev/core/bitblit.c | 16 + drivers/video/fbdev/core/fbcon.c | 509 ++++++++++++++++++++++++++++++-- drivers/video/fbdev/core/fbcon.h | 59 ++++ drivers/video/fbdev/core/fbcon_ccw.c | 28 +- drivers/video/fbdev/core/fbcon_cw.c | 28 +- drivers/video/fbdev/core/fbcon_rotate.h | 9 + drivers/video/fbdev/core/fbcon_ud.c | 37 ++- drivers/video/fbdev/core/tileblit.c | 16 + drivers/video/fbdev/skeletonfb.c | 12 +- include/linux/fb.h | 2 +- 11 files changed, 678 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index da138dd39883..29506815d24a 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -303,19 +303,16 @@ Level: Advanced Garbage collect fbdev scrolling acceleration -------------------------------------------- -Scroll acceleration has been disabled in fbcon. Now it works as the old -SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was -removed from fbcon_ops. -Remaining tasks: +Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = +SCROLL_REDRAW. There's a ton of code this will allow us to remove: -- a bunch of the hooks in fbcon_ops could be removed or simplified by calling +- lots of code in fbcon.c + +- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called directly instead of the function table (with a switch on p->rotate) - fb_copyarea is unused after this, and can be deleted from all drivers -- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as - well as cfb_copyarea - Note that not all acceleration code can be deleted, since clearing and cursor support is still accelerated, which might be good candidates for further deletion projects. diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 01fae2c96965..f98e8f298bc1 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute, } } +static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_copyarea area; + + area.sx = sx * vc->vc_font.width; + area.sy = sy * vc->vc_font.height; + area.dx = dx * vc->vc_font.width; + area.dy = dy * vc->vc_font.height; + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info) void fbcon_set_bitops(struct fbcon_ops *ops) { + ops->bmove = bit_bmove; ops->clear = bit_clear; ops->putcs = bit_putcs; ops->clear_margins = bit_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 99ecd9a6d844..fc34caddf9cf 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width); static int fbcon_switch(struct vc_data *vc); static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); @@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ +static __inline__ void ywrap_up(struct vc_data *vc, int count); +static __inline__ void ywrap_down(struct vc_data *vc, int count); +static __inline__ void ypan_up(struct vc_data *vc, int count); +static __inline__ void ypan_down(struct vc_data *vc, int count); +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static void fbcon_start(void); @@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; + /* + * No more hw acceleration for fbcon. + * + * FIXME: Garbage collect all the now dead code after sufficient time + * has passed. + */ + p->scrollmode = SCROLL_REDRAW; + /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only @@ -1211,13 +1229,14 @@ finished: * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * - * fbcon_clear(), fbcon_putc(), fbcon_clear_margins() + * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * + * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * @@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, } } +static __inline__ void ywrap_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + if (p->yscroll >= p->vrows) /* Deal with wrap */ + p->yscroll -= p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ywrap_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + if (p->yscroll < 0) /* Deal with wrap */ + p->yscroll += p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll += count; + if (p->yscroll > p->vrows - vc->vc_rows) { + ops->bmove(vc, info, p->vrows - vc->vc_rows, + 0, 0, 0, vc->vc_rows, vc->vc_cols); + p->yscroll -= p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + + if (p->yscroll > p->vrows - vc->vc_rows) { + p->yscroll -= p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll -= count; + if (p->yscroll < 0) { + ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, + 0, vc->vc_rows, vc->vc_cols); + p->yscroll += p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + + if (p->yscroll < 0) { + p->yscroll += p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy) +{ + unsigned short *s = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + unsigned short attr = 1; + + do { + c = scr_readw(s); + if (attr != (c & 0xff00)) { + attr = c & 0xff00; + if (s > start) { + fbcon_putcs(vc, start, s - start, + dy, x); + x += s - start; + start = s; + } + } + console_conditional_schedule(); + s++; + } while (s < le); + if (s > start) + fbcon_putcs(vc, start, s - start, dy, x); + console_conditional_schedule(); + dy++; + } +} + +static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, + struct fbcon_display *p, int line, int count, int ycount) +{ + int offset = ycount * vc->vc_cols; + unsigned short *d = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + unsigned short *s = d + offset; + struct fbcon_ops *ops = info->fbcon_par; + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + + do { + c = scr_readw(s); + + if (c == scr_readw(d)) { + if (s > start) { + ops->bmove(vc, info, line + ycount, x, + line, x, 1, s-start); + x += s - start + 1; + start = s + 1; + } else { + x++; + start++; + } + } + + scr_writew(c, d); + console_conditional_schedule(); + s++; + d++; + } while (s < le); + if (s > start) + ops->bmove(vc, info, line + ycount, x, line, x, 1, + s-start); + console_conditional_schedule(); + if (ycount > 0) + line++; + else { + line--; + /* NOTE: We subtract two lines from these pointers */ + s -= vc->vc_size_row; + d -= vc->vc_size_row; + } + } +} + static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, int line, int count, int offset) { @@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_display *p = &fb_display[vc->vc_num]; + int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return true; @@ -1466,32 +1704,249 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, t, b - t - count, - count * vc->vc_cols); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - (b - count)), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_up; + switch (p->scrollmode) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, t, b - t - count, + count); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ywrap_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_redraw_move(vc, p, 0, t, count); + ypan_up_redraw(vc, t, count); + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, + vc->vc_rows - b, b); + } else + fbcon_redraw_move(vc, p, t + count, b - t - count, t); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ypan_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_up: + fbcon_redraw(vc, p, t, b - t - count, + count * vc->vc_cols); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } + break; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, b - 1, b - t - count, - -count * vc->vc_cols); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - t), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_down; + switch (p->scrollmode) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, + -count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ywrap_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ypan_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, vc->vc_rows - b, + b - count); + ypan_down_redraw(vc, t, count); + if (t > 0) + fbcon_redraw_move(vc, p, count, t, 0); + } else + fbcon_redraw_move(vc, p, t, b - t - count, t + count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_down: + fbcon_redraw(vc, p, b - 1, b - t - count, + -count * vc->vc_cols); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } } return false; } + +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + if (fbcon_is_inactive(vc, info)) + return; + + if (!width || !height) + return; + + /* Split blits that cross physical y_wrap case. + * Pathological case involves 4 blits, better to use recursive + * code rather than unrolled case + * + * Recursive invocations don't need to erase the cursor over and + * over again, so we use fbcon_bmove_rec() + */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, + p->vrows - p->yscroll); +} + +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + u_int b; + + if (sy < y_break && sy + height > y_break) { + b = y_break - sy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + + if (dy < y_break && dy + height > y_break) { + b = y_break - dy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, + height, width); +} + static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -1664,7 +2119,21 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - scrollback_phys_max = 0; + switch (p->scrollmode) { + case SCROLL_WRAP_MOVE: + scrollback_phys_max = p->vrows - vc->vc_rows; + break; + case SCROLL_PAN_MOVE: + case SCROLL_PAN_REDRAW: + scrollback_phys_max = p->vrows - 2 * vc->vc_rows; + if (scrollback_phys_max < 0) + scrollback_phys_max = 0; + break; + default: + scrollback_phys_max = 0; + break; + } + scrollback_max = 0; scrollback_current = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index a00603b4451a..5246d0f2574b 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,6 +29,7 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ + u_short scrollmode; /* Scroll Method */ u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -51,6 +52,8 @@ struct fbcon_display { }; struct fbcon_ops { + void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width); void (*putcs)(struct vc_data *vc, struct fb_info *info, @@ -149,6 +152,62 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) + /* + * Scroll Method + */ + +/* There are several methods fbcon can use to move text around the screen: + * + * Operation Pan Wrap + *--------------------------------------------- + * SCROLL_MOVE copyarea No No + * SCROLL_PAN_MOVE copyarea Yes No + * SCROLL_WRAP_MOVE copyarea No Yes + * SCROLL_REDRAW imageblit No No + * SCROLL_PAN_REDRAW imageblit Yes No + * SCROLL_WRAP_REDRAW imageblit No Yes + * + * (SCROLL_WRAP_REDRAW is not implemented yet) + * + * In general, fbcon will choose the best scrolling + * method based on the rule below: + * + * Pan/Wrap > accel imageblit > accel copyarea > + * soft imageblit > (soft copyarea) + * + * Exception to the rule: Pan + accel copyarea is + * preferred over Pan + accel imageblit. + * + * The above is typical for PCI/AGP cards. Unless + * overridden, fbcon will never use soft copyarea. + * + * If you need to override the above rule, set the + * appropriate flags in fb_info->flags. For example, + * to prefer copyarea over imageblit, set + * FBINFO_READS_FAST. + * + * Other notes: + * + use the hardware engine to move the text + * (hw-accelerated copyarea() and fillrect()) + * + use hardware-supported panning on a large virtual screen + * + amifb can not only pan, but also wrap the display by N lines + * (i.e. visible line i = physical line (i+N) % yres). + * + read what's already rendered on the screen and + * write it in a different place (this is cfb_copyarea()) + * + re-render the text to the screen + * + * Whether to use wrapping or panning can only be figured out at + * runtime (when we know whether our font height is a multiple + * of the pan/wrap step) + * + */ + +#define SCROLL_MOVE 0x001 +#define SCROLL_PAN_MOVE 0x002 +#define SCROLL_WRAP_MOVE 0x003 +#define SCROLL_REDRAW 0x004 +#define SCROLL_PAN_REDRAW 0x005 + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index ffa78936eaab..9cd2c4b05c32 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + + area.sx = sy * vc->vc_font.height; + area.sy = vyres - ((sx + width) * vc->vc_font.width); + area.dx = dy * vc->vc_font.height; + area.dy = vyres - ((dx + width) * vc->vc_font.width); + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; @@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info) void fbcon_rotate_ccw(struct fbcon_ops *ops) { + ops->bmove = ccw_bmove; ops->clear = ccw_clear; ops->putcs = ccw_putcs; ops->clear_margins = ccw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 92e5b7fb51ee..88d89fad3f05 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vxres = GETVXRES(ops->p->scrollmode, info); + + area.sx = vxres - ((sy + height) * vc->vc_font.height); + area.sy = sx * vc->vc_font.width; + area.dx = vxres - ((dy + height) * vc->vc_font.height); + area.dy = dx * vc->vc_font.width; + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); u32 xoffset; int err; @@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info) void fbcon_rotate_cw(struct fbcon_ops *ops) { + ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index b528b2e54283..e233444cda66 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -11,6 +11,15 @@ #ifndef _FBCON_ROTATE_H #define _FBCON_ROTATE_H +#define GETVYRES(s,i) ({ \ + (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ + (i)->var.yres : (i)->var.yres_virtual; }) + +#define GETVXRES(s,i) ({ \ + (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (i)->var.xres : (i)->var.xres_virtual; }) + + static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) { u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 09619bd8e021..8d5e66b1bdfb 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); + + area.sy = vyres - ((sy + height) * vc->vc_font.height); + area.sx = vxres - ((sx + width) * vc->vc_font.width); + area.dy = vyres - ((dy + height) * vc->vc_font.height); + area.dx = vxres - ((dx + width) * vc->vc_font.width); + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; @@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info) void fbcon_rotate_ud(struct fbcon_ops *ops) { + ops->bmove = ud_bmove; ops->clear = ud_clear; ops->putcs = ud_putcs; ops->clear_margins = ud_clear_margins; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 72af95053bcb..2768eff247ba 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -16,6 +16,21 @@ #include #include "fbcon.h" +static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_tilearea area; + + area.sx = sx; + area.sy = sy; + area.dx = dx; + area.dy = dy; + area.height = height; + area.width = width; + + info->tileops->fb_tilecopy(info, &area); +} + static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) struct fb_tilemap map; struct fbcon_ops *ops = info->fbcon_par; + ops->bmove = tile_bmove; ops->clear = tile_clear; ops->putcs = tile_putcs; ops->clear_margins = tile_clear_margins; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index 0fe922f726e9..bcacfb6934fa 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) } /** - * xxxfb_copyarea - OBSOLETE function. + * xxxfb_copyarea - REQUIRED function. Can use generic routines if + * non acclerated hardware and packed pixel based. * Copies one area of the screen to another area. - * Will be deleted in a future version * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * - * This drawing operation copied a rectangular area from one area of the + * This drawing operation copies a rectangular area from one area of the * screen to another area. */ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) @@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = { .fb_setcolreg = xxxfb_setcolreg, .fb_blank = xxxfb_blank, .fb_pan_display = xxxfb_pan_display, - .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ - .fb_copyarea = xxxfb_copyarea, /* Obsolete */ - .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ + .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ + .fb_copyarea = xxxfb_copyarea, /* Needed !!! */ + .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ .fb_cursor = xxxfb_cursor, /* Optional !!! */ .fb_sync = xxxfb_sync, .fb_ioctl = xxxfb_ioctl, diff --git a/include/linux/fb.h b/include/linux/fb.h index 3da95842b207..02f362c661c8 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -262,7 +262,7 @@ struct fb_ops { /* Draws a rectangle */ void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); - /* Copy data from area to another. Obsolete. */ + /* Copy data from area to another */ void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); /* Draws a image to the display */ void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); -- cgit v1.2.3-71-gd317 From 3ec762fb13c7e7273800b94c80db1c2cc37590d1 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Wed, 2 Feb 2022 01:03:23 +0100 Subject: net: dsa: tag_qca: move define to include linux/dsa Move tag_qca define to include dir linux/dsa as the qca8k require access to the tagger define to support in-band mdio read/write using ethernet packet. Signed-off-by: Ansuel Smith Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/dsa/tag_qca.h | 21 +++++++++++++++++++++ net/dsa/tag_qca.c | 16 +--------------- 2 files changed, 22 insertions(+), 15 deletions(-) create mode 100644 include/linux/dsa/tag_qca.h (limited to 'include/linux') diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h new file mode 100644 index 000000000000..c02d2d39ff4a --- /dev/null +++ b/include/linux/dsa/tag_qca.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __TAG_QCA_H +#define __TAG_QCA_H + +#define QCA_HDR_LEN 2 +#define QCA_HDR_VERSION 0x2 + +#define QCA_HDR_RECV_VERSION GENMASK(15, 14) +#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) +#define QCA_HDR_RECV_TYPE GENMASK(10, 6) +#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) +#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) + +#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) +#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) +#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) +#define QCA_HDR_XMIT_FROM_CPU BIT(7) +#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) + +#endif /* __TAG_QCA_H */ diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 55fa6b96b4eb..34e565e00ece 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -5,24 +5,10 @@ #include #include +#include #include "dsa_priv.h" -#define QCA_HDR_LEN 2 -#define QCA_HDR_VERSION 0x2 - -#define QCA_HDR_RECV_VERSION GENMASK(15, 14) -#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) -#define QCA_HDR_RECV_TYPE GENMASK(10, 6) -#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) -#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) - -#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) -#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) -#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) -#define QCA_HDR_XMIT_FROM_CPU BIT(7) -#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) - static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); -- cgit v1.2.3-71-gd317 From c2ee8181fddb293d296477f60b3eb4fa3ce4e1a6 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Wed, 2 Feb 2022 01:03:25 +0100 Subject: net: dsa: tag_qca: add define for handling mgmt Ethernet packet Add all the required define to prepare support for mgmt read/write in Ethernet packet. Any packet of this type has to be dropped as the only use of these special packet is receive ack for an mgmt write request or receive data for an mgmt read request. A struct is used that emulates the Ethernet header but is used for a different purpose. Signed-off-by: Ansuel Smith Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/dsa/tag_qca.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ net/dsa/tag_qca.c | 15 ++++++++++++--- 2 files changed, 56 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index c02d2d39ff4a..f366422ab7a0 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -12,10 +12,54 @@ #define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) #define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) +/* Packet type for recv */ +#define QCA_HDR_RECV_TYPE_NORMAL 0x0 +#define QCA_HDR_RECV_TYPE_MIB 0x1 +#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2 + #define QCA_HDR_XMIT_VERSION GENMASK(15, 14) #define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) #define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) #define QCA_HDR_XMIT_FROM_CPU BIT(7) #define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) +/* Packet type for xmit */ +#define QCA_HDR_XMIT_TYPE_NORMAL 0x0 +#define QCA_HDR_XMIT_TYPE_RW_REG 0x1 + +/* Check code for a valid mgmt packet. Switch will ignore the packet + * with this wrong. + */ +#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5 + +/* Specific define for in-band MDIO read/write with Ethernet packet */ +#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */ +#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */ +#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */ +#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \ + QCA_HDR_MGMT_COMMAND_LEN + \ + QCA_HDR_MGMT_DATA1_LEN) + +#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */ +#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */ + +#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \ + QCA_HDR_LEN + \ + QCA_HDR_MGMT_DATA2_LEN + \ + QCA_HDR_MGMT_PADDING_LEN) + +#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */ +#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */ +#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */ +#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */ +#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */ + +/* Special struct emulating a Ethernet header */ +struct qca_mgmt_ethhdr { + u32 command; /* command bit 31:0 */ + u32 seq; /* seq 63:32 */ + u32 mdio_data; /* first 4byte mdio */ + __be16 hdr; /* qca hdr */ +} __packed; + #endif /* __TAG_QCA_H */ diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index f8df49d5956f..f17ed5be20c2 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -32,10 +32,12 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) { - u8 ver; - u16 hdr; - int port; + u8 ver, pk_type; __be16 *phdr; + int port; + u16 hdr; + + BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) return NULL; @@ -48,6 +50,13 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) if (unlikely(ver != QCA_HDR_VERSION)) return NULL; + /* Get pk type */ + pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); + + /* Ethernet MDIO read/write packet */ + if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) + return NULL; + /* Remove QCA tag and recalculate checksum */ skb_pull_rcsum(skb, QCA_HDR_LEN); dsa_strip_etype_header(skb, QCA_HDR_LEN); -- cgit v1.2.3-71-gd317 From 18be654a4345f7d937b4bfbad74bea8093e3a93c Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Wed, 2 Feb 2022 01:03:26 +0100 Subject: net: dsa: tag_qca: add define for handling MIB packet Add struct to correctly parse a mib Ethernet packet. Signed-off-by: Ansuel Smith Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/dsa/tag_qca.h | 10 ++++++++++ net/dsa/tag_qca.c | 4 ++++ 2 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index f366422ab7a0..1fff57f2937b 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -62,4 +62,14 @@ struct qca_mgmt_ethhdr { __be16 hdr; /* qca hdr */ } __packed; +enum mdio_cmd { + MDIO_WRITE = 0x0, + MDIO_READ +}; + +struct mib_ethhdr { + u32 data[3]; /* first 3 mib counter */ + __be16 hdr; /* qca hdr */ +} __packed; + #endif /* __TAG_QCA_H */ diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index f17ed5be20c2..be792cf687d9 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -57,6 +57,10 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) return NULL; + /* Ethernet MIB counter packet */ + if (pk_type == QCA_HDR_RECV_TYPE_MIB) + return NULL; + /* Remove QCA tag and recalculate checksum */ skb_pull_rcsum(skb, QCA_HDR_LEN); dsa_strip_etype_header(skb, QCA_HDR_LEN); -- cgit v1.2.3-71-gd317 From 31eb6b4386ad91930417e3f5c8157a4b5e31cbd5 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Wed, 2 Feb 2022 01:03:27 +0100 Subject: net: dsa: tag_qca: add support for handling mgmt and MIB Ethernet packet Add connect/disconnect helper to assign private struct to the DSA switch. Add support for Ethernet mgmt and MIB if the DSA driver provide an handler to correctly parse and elaborate the data. Signed-off-by: Ansuel Smith Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/dsa/tag_qca.h | 7 +++++++ net/dsa/tag_qca.c | 39 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index 1fff57f2937b..4359fb0221cf 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -72,4 +72,11 @@ struct mib_ethhdr { __be16 hdr; /* qca hdr */ } __packed; +struct qca_tagger_data { + void (*rw_reg_ack_handler)(struct dsa_switch *ds, + struct sk_buff *skb); + void (*mib_autocast_handler)(struct dsa_switch *ds, + struct sk_buff *skb); +}; + #endif /* __TAG_QCA_H */ diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index be792cf687d9..57d2e00f1e5d 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -5,6 +5,7 @@ #include #include +#include #include #include "dsa_priv.h" @@ -32,6 +33,9 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) { + struct qca_tagger_data *tagger_data; + struct dsa_port *dp = dev->dsa_ptr; + struct dsa_switch *ds = dp->ds; u8 ver, pk_type; __be16 *phdr; int port; @@ -39,6 +43,8 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); + tagger_data = ds->tagger_data; + if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) return NULL; @@ -53,13 +59,19 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) /* Get pk type */ pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); - /* Ethernet MDIO read/write packet */ - if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) + /* Ethernet mgmt read/write packet */ + if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) { + if (likely(tagger_data->rw_reg_ack_handler)) + tagger_data->rw_reg_ack_handler(ds, skb); return NULL; + } /* Ethernet MIB counter packet */ - if (pk_type == QCA_HDR_RECV_TYPE_MIB) + if (pk_type == QCA_HDR_RECV_TYPE_MIB) { + if (likely(tagger_data->mib_autocast_handler)) + tagger_data->mib_autocast_handler(ds, skb); return NULL; + } /* Remove QCA tag and recalculate checksum */ skb_pull_rcsum(skb, QCA_HDR_LEN); @@ -75,9 +87,30 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) return skb; } +static int qca_tag_connect(struct dsa_switch *ds) +{ + struct qca_tagger_data *tagger_data; + + tagger_data = kzalloc(sizeof(*tagger_data), GFP_KERNEL); + if (!tagger_data) + return -ENOMEM; + + ds->tagger_data = tagger_data; + + return 0; +} + +static void qca_tag_disconnect(struct dsa_switch *ds) +{ + kfree(ds->tagger_data); + ds->tagger_data = NULL; +} + static const struct dsa_device_ops qca_netdev_ops = { .name = "qca", .proto = DSA_TAG_PROTO_QCA, + .connect = qca_tag_connect, + .disconnect = qca_tag_disconnect, .xmit = qca_tag_xmit, .rcv = qca_tag_rcv, .needed_headroom = QCA_HDR_LEN, -- cgit v1.2.3-71-gd317 From 926597ffce0e3e2f785475df18e1636194209910 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:39:11 +0100 Subject: block: move disk_{block,unblock,flush}_events to blk.h No need to have these declarations in a public header. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220124093913.742411-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 3 +++ include/linux/genhd.h | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/block/blk.h b/block/blk.h index 8bd43b3ad33d..2cba50d7e6cb 100644 --- a/block/blk.h +++ b/block/blk.h @@ -445,6 +445,9 @@ int disk_alloc_events(struct gendisk *disk); void disk_add_events(struct gendisk *disk); void disk_del_events(struct gendisk *disk); void disk_release_events(struct gendisk *disk); +void disk_block_events(struct gendisk *disk); +void disk_unblock_events(struct gendisk *disk); +void disk_flush_events(struct gendisk *disk, unsigned int mask); extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 6906a45bc761..504f9a6674ac 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -185,9 +185,6 @@ static inline int bdev_read_only(struct block_device *bdev) return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); } -extern void disk_block_events(struct gendisk *disk); -extern void disk_unblock_events(struct gendisk *disk); -extern void disk_flush_events(struct gendisk *disk, unsigned int mask); bool set_capacity_and_notify(struct gendisk *disk, sector_t size); bool disk_force_media_change(struct gendisk *disk, unsigned int events); -- cgit v1.2.3-71-gd317 From e7243285c0fc87054990fcde630583586ff8ed5f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:39:12 +0100 Subject: block: move blk_drop_partitions to blk.h No need to have this declaration in a public header. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220124093913.742411-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 1 + include/linux/genhd.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/blk.h b/block/blk.h index 2cba50d7e6cb..800c5ae387a0 100644 --- a/block/blk.h +++ b/block/blk.h @@ -426,6 +426,7 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, int bdev_del_partition(struct gendisk *disk, int partno); int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, sector_t length); +void blk_drop_partitions(struct gendisk *disk); int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 504f9a6674ac..aa4bd985dbe5 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -219,7 +219,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb) } int bdev_disk_changed(struct gendisk *disk, bool invalidate); -void blk_drop_partitions(struct gendisk *disk); struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, struct lock_class_key *lkclass); -- cgit v1.2.3-71-gd317 From 322cbb50de711814c42fb088f6d31901502c711a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:39:13 +0100 Subject: block: remove genhd.h There is no good reason to keep genhd.h separate from the main blkdev.h header that includes it. So fold the contents of genhd.h into blkdev.h and remove genhd.h entirely. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220124093913.742411-4-hch@lst.de Signed-off-by: Jens Axboe --- Documentation/block/capability.rst | 2 +- arch/m68k/atari/stdma.c | 1 - arch/m68k/bvme6000/config.c | 1 - arch/m68k/emu/nfblock.c | 1 - arch/m68k/kernel/setup_mm.c | 1 - arch/m68k/mvme147/config.c | 1 - arch/m68k/mvme16x/config.c | 1 - block/blk-cgroup.c | 1 - block/disk-events.c | 2 +- block/genhd.c | 1 - block/holder.c | 2 +- block/partitions/check.h | 1 - block/partitions/core.c | 1 - block/partitions/efi.h | 1 - block/partitions/ldm.h | 1 - block/sed-opal.c | 2 +- drivers/base/class.c | 2 +- drivers/base/core.c | 2 +- drivers/base/devtmpfs.c | 2 +- drivers/block/aoe/aoeblk.c | 1 - drivers/block/aoe/aoecmd.c | 1 - drivers/block/drbd/drbd_int.h | 1 - drivers/block/mtip32xx/mtip32xx.c | 1 - drivers/block/mtip32xx/mtip32xx.h | 1 - drivers/block/rnbd/rnbd-srv-sysfs.c | 1 - drivers/block/sunvdc.c | 1 - drivers/block/zram/zram_drv.c | 1 - drivers/cdrom/gdrom.c | 1 - drivers/char/random.c | 2 +- drivers/md/bcache/super.c | 1 - drivers/md/dm-core.h | 1 - drivers/mtd/mtdswap.c | 2 +- drivers/mtd/nand/raw/sharpsl.c | 1 - drivers/nvdimm/blk.c | 1 - drivers/nvdimm/btt.c | 1 - drivers/nvdimm/btt_devs.c | 1 - drivers/nvdimm/bus.c | 1 - drivers/nvdimm/pfn_devs.c | 1 - drivers/s390/block/dasd_int.h | 1 - drivers/s390/block/scm_blk.c | 1 - drivers/s390/block/scm_blk.h | 1 - drivers/scsi/scsi_debug.c | 1 - drivers/scsi/scsicam.c | 1 - drivers/scsi/sd.c | 1 - drivers/scsi/sr.h | 1 - drivers/target/target_core_iblock.c | 1 - drivers/target/target_core_pscsi.c | 1 - fs/btrfs/check-integrity.c | 1 - fs/dax.c | 1 - fs/gfs2/sys.c | 2 +- fs/hfs/mdb.c | 2 +- fs/hfsplus/wrapper.c | 1 - fs/ksmbd/vfs.c | 1 - fs/nfs/blocklayout/rpc_pipefs.c | 1 - fs/nfsd/blocklayout.c | 1 - include/linux/blkdev.h | 273 +++++++++++++++++++++++++++++++++- include/linux/genhd.h | 287 ------------------------------------ include/linux/part_stat.h | 2 +- init/do_mounts.c | 1 - kernel/power/hibernate.c | 1 - kernel/power/swap.c | 1 - security/integrity/ima/ima_policy.c | 1 - 62 files changed, 282 insertions(+), 350 deletions(-) delete mode 100644 include/linux/genhd.h (limited to 'include/linux') diff --git a/Documentation/block/capability.rst b/Documentation/block/capability.rst index 160a5148b915..2ae7f064736a 100644 --- a/Documentation/block/capability.rst +++ b/Documentation/block/capability.rst @@ -7,4 +7,4 @@ This file documents the sysfs file ``block//capability``. ``capability`` is a bitfield, printed in hexadecimal, indicating which capabilities a specific block device supports: -.. kernel-doc:: include/linux/genhd.h +.. kernel-doc:: include/linux/blkdev.h diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c index ba65f942d0c7..ce6818eff75e 100644 --- a/arch/m68k/atari/stdma.c +++ b/arch/m68k/atari/stdma.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 0c6feafbbd11..0fe0f3e888fb 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 9c57b245dc12..267b02cc5655 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 49e573b94326..ee268055bdce 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index dfd6202fd403..db1430dc411f 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index b4422c2dfbbf..45a07ab3123a 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 650f7e27989f..671debbae941 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/block/disk-events.c b/block/disk-events.c index 8d5496e7592a..aee25a7e1ab7 100644 --- a/block/disk-events.c +++ b/block/disk-events.c @@ -4,7 +4,7 @@ */ #include #include -#include +#include #include "blk.h" struct disk_events { diff --git a/block/genhd.c b/block/genhd.c index 6ae990ff0266..9589d1d59afa 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/block/holder.c b/block/holder.c index 27cddce1b446..8d750281a1cd 100644 --- a/block/holder.c +++ b/block/holder.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -#include +#include #include struct bd_holder_disk { diff --git a/block/partitions/check.h b/block/partitions/check.h index d5b28e309d64..4ffa2359b1a3 100644 --- a/block/partitions/check.h +++ b/block/partitions/check.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include -#include #include "../blk.h" /* diff --git a/block/partitions/core.c b/block/partitions/core.c index c2a1635922b1..2ef8dfa1e5c8 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/block/partitions/efi.h b/block/partitions/efi.h index 8cc2b88d0aa8..84b9f36b9e47 100644 --- a/block/partitions/efi.h +++ b/block/partitions/efi.h @@ -13,7 +13,6 @@ #include #include -#include #include #include #include diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h index 8693704dcf5e..0a747a0c782d 100644 --- a/block/partitions/ldm.h +++ b/block/partitions/ldm.h @@ -14,7 +14,6 @@ #include #include -#include #include #include #include diff --git a/block/sed-opal.c b/block/sed-opal.c index daafadbb88ca..9700197000f2 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/base/class.c b/drivers/base/class.c index 7476f393df97..8feb85e186e3 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include "base.h" diff --git a/drivers/base/core.c b/drivers/base/core.c index 7bb957b11861..3d6430eb0c6a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index f41063ac1aee..db5a03a0618e 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 52484bcdedb9..8a91fcac6f82 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 6af111f568e4..cc11f89a0928 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index f27d5b0f9a0b..acb1ad3c0603 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index e6005c232328..cba956881d55 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 88f4206310e4..6816beb45352 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -15,7 +15,6 @@ #include #include #include -#include /* Offset of Subsystem Device ID in pci confoguration space */ #define PCI_SUBSYSTEM_DEVICEID 0x2E diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c index 4db98e0e76f0..feaa76c5a342 100644 --- a/drivers/block/rnbd/rnbd-srv-sysfs.c +++ b/drivers/block/rnbd/rnbd-srv-sysfs.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 146d85d80e0e..dd0a1a6fed29 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index cb253d80d72b..342dbcb3f220 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index faead41709bc..8e78b37d0f6a 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/char/random.c b/drivers/char/random.c index 68613f0b6887..f206c87c6202 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -330,7 +330,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 140f35dc0c45..c31a62b963f0 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index b855fef4f38a..72d18c3fbf1f 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -11,7 +11,6 @@ #include #include -#include #include #include diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index e86b04bc1d6b..dc7f1532a37f 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index 5612ee628425..52ce5162538a 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -6,7 +6,6 @@ * Based on Sharp's NAND driver sharp_sl.c */ -#include #include #include #include diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 228c33b8d1d6..c1db43524d75 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -6,7 +6,6 @@ #include #include -#include #include #include #include diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index da3f007a1211..cbd994f7f1fe 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 8b52e5144f08..e5a58520d398 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -4,7 +4,6 @@ */ #include #include -#include #include #include #include diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9dc7f3edd42b..5bbe31b08581 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 58eda16f5c53..c31e184bfa45 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 8b458010f88a..3b7af00a7825 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 61ecdcb2cc6a..2a9c0ddcade5 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index a05a4297cfae..af82b3214774 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2104973a35cd..911cc72dd7ac 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index 0ffdb8f2995f..acdc0aceca5e 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 62eb9921cc94..2d648d27bfd7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 339c624e04d8..1609f02ed29a 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -18,7 +18,6 @@ #ifndef _SR_H #define _SR_H -#include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index bf8ae4825a06..6045678365a5 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 807d06ecadee..0fae71ac5cc8 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 7e9f90fa0388..abac86a75840 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -78,7 +78,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/dax.c b/fs/dax.c index cd03485867a7..ab0978739eaa 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index a6002b2d146d..d87ea98cf535 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "gfs2.h" #include "incore.h" diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c index 5beb82652435..8082eb01127c 100644 --- a/fs/hfs/mdb.c +++ b/fs/hfs/mdb.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 51ae6f1eb4a5..4688cc7b3692 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include "hfsplus_fs.h" diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c index 19d36393974c..9cebb6ba555b 100644 --- a/fs/ksmbd/vfs.c +++ b/fs/ksmbd/vfs.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c index ef9db135c649..6c977288cc28 100644 --- a/fs/nfs/blocklayout/rpc_pipefs.c +++ b/fs/nfs/blocklayout/rpc_pipefs.c @@ -27,7 +27,6 @@ */ #include -#include #include #include "blocklayout.h" diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index e5c0982a381d..b6d01d51a746 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -4,7 +4,6 @@ */ #include #include -#include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f35aea98bc35..99a4384bb8a5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1,9 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Portions Copyright (C) 1992 Drew Eckhardt + */ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H -#include -#include +#include +#include +#include #include #include #include @@ -12,11 +16,15 @@ #include #include #include +#include #include #include #include +#include #include #include +#include +#include struct module; struct request_queue; @@ -33,6 +41,10 @@ struct blk_queue_stats; struct blk_stat_callback; struct blk_crypto_profile; +extern const struct device_type disk_type; +extern struct device_type part_type; +extern struct class block_class; + /* Must be consistent with blk_mq_poll_stats_bkt() */ #define BLK_MQ_POLL_STATS_BKTS 16 @@ -45,6 +57,144 @@ struct blk_crypto_profile; */ #define BLKCG_MAX_POLS 6 +#define DISK_MAX_PARTS 256 +#define DISK_NAME_LEN 32 + +#define PARTITION_META_INFO_VOLNAMELTH 64 +/* + * Enough for the string representation of any kind of UUID plus NULL. + * EFI UUID is 36 characters. MSDOS UUID is 11 characters. + */ +#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) + +struct partition_meta_info { + char uuid[PARTITION_META_INFO_UUIDLTH]; + u8 volname[PARTITION_META_INFO_VOLNAMELTH]; +}; + +/** + * DOC: genhd capability flags + * + * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to + * removable media. When set, the device remains present even when media is not + * inserted. Shall not be set for devices which are removed entirely when the + * media is removed. + * + * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, + * doesn't appear in sysfs, and can't be opened from userspace or using + * blkdev_get*. Used for the underlying components of multipath devices. + * + * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not + * scan for partitions from add_disk, and users can't add partitions manually. + * + */ +enum { + GENHD_FL_REMOVABLE = 1 << 0, + GENHD_FL_HIDDEN = 1 << 1, + GENHD_FL_NO_PART = 1 << 2, +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ + DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ +}; + +enum { + /* Poll even if events_poll_msecs is unset */ + DISK_EVENT_FLAG_POLL = 1 << 0, + /* Forward events to udev */ + DISK_EVENT_FLAG_UEVENT = 1 << 1, + /* Block event polling when open for exclusive write */ + DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, +}; + +struct disk_events; +struct badblocks; + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct gendisk { + /* + * major/first_minor/minors should not be set by any new driver, the + * block core will take care of allocating them automatically. + */ + int major; + int first_minor; + int minors; + + char disk_name[DISK_NAME_LEN]; /* name of major driver */ + + unsigned short events; /* supported events */ + unsigned short event_flags; /* flags related to event processing */ + + struct xarray part_tbl; + struct block_device *part0; + + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + + int flags; + unsigned long state; +#define GD_NEED_PART_SCAN 0 +#define GD_READ_ONLY 1 +#define GD_DEAD 2 +#define GD_NATIVE_CAPACITY 3 + + struct mutex open_mutex; /* open/close mutex */ + unsigned open_partitions; /* number of open partitions */ + + struct backing_dev_info *bdi; + struct kobject *slave_dir; +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED + struct list_head slave_bdevs; +#endif + struct timer_rand_state *random; + atomic_t sync_io; /* RAID */ + struct disk_events *ev; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct kobject integrity_kobj; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ +#if IS_ENABLED(CONFIG_CDROM) + struct cdrom_device_info *cdi; +#endif + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; + u64 diskseq; +}; + +static inline bool disk_live(struct gendisk *disk) +{ + return !inode_unhashed(disk->part0->bd_inode); +} + +/* + * The gendisk is refcounted by the part0 block_device, and the bd_device + * therein is also used for device model presentation in sysfs. + */ +#define dev_to_disk(device) \ + (dev_to_bdev(device)->bd_disk) +#define disk_to_dev(disk) \ + (&((disk)->part0->bd_device)) + +#if IS_REACHABLE(CONFIG_CDROM) +#define disk_to_cdi(disk) ((disk)->cdi) +#else +#define disk_to_cdi(disk) NULL +#endif + +static inline dev_t disk_devt(struct gendisk *disk) +{ + return MKDEV(disk->major, disk->first_minor); +} + static inline int blk_validate_block_size(unsigned long bsize) { if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) @@ -596,6 +746,118 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) +int __must_check device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline int __must_check add_disk(struct gendisk *disk) +{ + return device_add_disk(NULL, disk, NULL); +} +void del_gendisk(struct gendisk *gp); +void invalidate_disk(struct gendisk *disk); +void set_disk_ro(struct gendisk *disk, bool read_only); +void disk_uevent(struct gendisk *disk, enum kobject_action action); + +static inline int get_disk_ro(struct gendisk *disk) +{ + return disk->part0->bd_read_only || + test_bit(GD_READ_ONLY, &disk->state); +} + +static inline int bdev_read_only(struct block_device *bdev) +{ + return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); +} + +bool set_capacity_and_notify(struct gendisk *disk, sector_t size); +bool disk_force_media_change(struct gendisk *disk, unsigned int events); + +void add_disk_randomness(struct gendisk *disk) __latent_entropy; +void rand_initialize_disk(struct gendisk *disk); + +static inline sector_t get_start_sect(struct block_device *bdev) +{ + return bdev->bd_start_sect; +} + +static inline sector_t bdev_nr_sectors(struct block_device *bdev) +{ + return bdev->bd_nr_sectors; +} + +static inline loff_t bdev_nr_bytes(struct block_device *bdev) +{ + return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; +} + +static inline sector_t get_capacity(struct gendisk *disk) +{ + return bdev_nr_sectors(disk->part0); +} + +static inline u64 sb_bdev_nr_blocks(struct super_block *sb) +{ + return bdev_nr_sectors(sb->s_bdev) >> + (sb->s_blocksize_bits - SECTOR_SHIFT); +} + +int bdev_disk_changed(struct gendisk *disk, bool invalidate); + +struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, + struct lock_class_key *lkclass); +void put_disk(struct gendisk *disk); +struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); + +/** + * blk_alloc_disk - allocate a gendisk structure + * @node_id: numa node to allocate on + * + * Allocate and pre-initialize a gendisk structure for use with BIO based + * drivers. + * + * Context: can sleep + */ +#define blk_alloc_disk(node_id) \ +({ \ + static struct lock_class_key __key; \ + \ + __blk_alloc_disk(node_id, &__key); \ +}) +void blk_cleanup_disk(struct gendisk *disk); + +int __register_blkdev(unsigned int major, const char *name, + void (*probe)(dev_t devt)); +#define register_blkdev(major, name) \ + __register_blkdev(major, name, NULL) +void unregister_blkdev(unsigned int major, const char *name); + +bool bdev_check_media_change(struct block_device *bdev); +int __invalidate_device(struct block_device *bdev, bool kill_dirty); +void set_capacity(struct gendisk *disk, sector_t size); + +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +int bd_register_pending_holders(struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +static inline int bd_register_pending_holders(struct gendisk *disk) +{ + return 0; +} +#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ + +dev_t part_devt(struct gendisk *disk, u8 partno); +void inc_diskseq(struct gendisk *disk); +dev_t blk_lookup_devt(const char *name, int partno); +void blk_request_module(dev_t devt); extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); @@ -1311,6 +1573,7 @@ void invalidate_bdev(struct block_device *bdev); int sync_blockdev(struct block_device *bdev); int sync_blockdev_nowait(struct block_device *bdev); void sync_bdevs(bool wait); +void printk_all_partitions(void); #else static inline void invalidate_bdev(struct block_device *bdev) { @@ -1326,7 +1589,11 @@ static inline int sync_blockdev_nowait(struct block_device *bdev) static inline void sync_bdevs(bool wait) { } -#endif +static inline void printk_all_partitions(void) +{ +} +#endif /* CONFIG_BLOCK */ + int fsync_bdev(struct block_device *bdev); int freeze_bdev(struct block_device *bdev); diff --git a/include/linux/genhd.h b/include/linux/genhd.h deleted file mode 100644 index aa4bd985dbe5..000000000000 --- a/include/linux/genhd.h +++ /dev/null @@ -1,287 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_GENHD_H -#define _LINUX_GENHD_H - -/* - * genhd.h Copyright (C) 1992 Drew Eckhardt - * Generic hard disk header file by - * Drew Eckhardt - * - * - */ - -#include -#include -#include -#include -#include -#include - -extern const struct device_type disk_type; -extern struct device_type part_type; -extern struct class block_class; - -#define DISK_MAX_PARTS 256 -#define DISK_NAME_LEN 32 - -#define PARTITION_META_INFO_VOLNAMELTH 64 -/* - * Enough for the string representation of any kind of UUID plus NULL. - * EFI UUID is 36 characters. MSDOS UUID is 11 characters. - */ -#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) - -struct partition_meta_info { - char uuid[PARTITION_META_INFO_UUIDLTH]; - u8 volname[PARTITION_META_INFO_VOLNAMELTH]; -}; - -/** - * DOC: genhd capability flags - * - * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to - * removable media. When set, the device remains present even when media is not - * inserted. Shall not be set for devices which are removed entirely when the - * media is removed. - * - * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, - * doesn't appear in sysfs, and can't be opened from userspace or using - * blkdev_get*. Used for the underlying components of multipath devices. - * - * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not - * scan for partitions from add_disk, and users can't add partitions manually. - * - */ -enum { - GENHD_FL_REMOVABLE = 1 << 0, - GENHD_FL_HIDDEN = 1 << 1, - GENHD_FL_NO_PART = 1 << 2, -}; - -enum { - DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ - DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ -}; - -enum { - /* Poll even if events_poll_msecs is unset */ - DISK_EVENT_FLAG_POLL = 1 << 0, - /* Forward events to udev */ - DISK_EVENT_FLAG_UEVENT = 1 << 1, - /* Block event polling when open for exclusive write */ - DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, -}; - -struct disk_events; -struct badblocks; - -struct blk_integrity { - const struct blk_integrity_profile *profile; - unsigned char flags; - unsigned char tuple_size; - unsigned char interval_exp; - unsigned char tag_size; -}; - -struct gendisk { - /* - * major/first_minor/minors should not be set by any new driver, the - * block core will take care of allocating them automatically. - */ - int major; - int first_minor; - int minors; - - char disk_name[DISK_NAME_LEN]; /* name of major driver */ - - unsigned short events; /* supported events */ - unsigned short event_flags; /* flags related to event processing */ - - struct xarray part_tbl; - struct block_device *part0; - - const struct block_device_operations *fops; - struct request_queue *queue; - void *private_data; - - int flags; - unsigned long state; -#define GD_NEED_PART_SCAN 0 -#define GD_READ_ONLY 1 -#define GD_DEAD 2 -#define GD_NATIVE_CAPACITY 3 - - struct mutex open_mutex; /* open/close mutex */ - unsigned open_partitions; /* number of open partitions */ - - struct backing_dev_info *bdi; - struct kobject *slave_dir; -#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED - struct list_head slave_bdevs; -#endif - struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ - struct disk_events *ev; -#ifdef CONFIG_BLK_DEV_INTEGRITY - struct kobject integrity_kobj; -#endif /* CONFIG_BLK_DEV_INTEGRITY */ -#if IS_ENABLED(CONFIG_CDROM) - struct cdrom_device_info *cdi; -#endif - int node_id; - struct badblocks *bb; - struct lockdep_map lockdep_map; - u64 diskseq; -}; - -static inline bool disk_live(struct gendisk *disk) -{ - return !inode_unhashed(disk->part0->bd_inode); -} - -/* - * The gendisk is refcounted by the part0 block_device, and the bd_device - * therein is also used for device model presentation in sysfs. - */ -#define dev_to_disk(device) \ - (dev_to_bdev(device)->bd_disk) -#define disk_to_dev(disk) \ - (&((disk)->part0->bd_device)) - -#if IS_REACHABLE(CONFIG_CDROM) -#define disk_to_cdi(disk) ((disk)->cdi) -#else -#define disk_to_cdi(disk) NULL -#endif - -static inline dev_t disk_devt(struct gendisk *disk) -{ - return MKDEV(disk->major, disk->first_minor); -} - -void disk_uevent(struct gendisk *disk, enum kobject_action action); - -/* block/genhd.c */ -int __must_check device_add_disk(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups); -static inline int __must_check add_disk(struct gendisk *disk) -{ - return device_add_disk(NULL, disk, NULL); -} -extern void del_gendisk(struct gendisk *gp); - -void invalidate_disk(struct gendisk *disk); - -void set_disk_ro(struct gendisk *disk, bool read_only); - -static inline int get_disk_ro(struct gendisk *disk) -{ - return disk->part0->bd_read_only || - test_bit(GD_READ_ONLY, &disk->state); -} - -static inline int bdev_read_only(struct block_device *bdev) -{ - return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); -} - -bool set_capacity_and_notify(struct gendisk *disk, sector_t size); -bool disk_force_media_change(struct gendisk *disk, unsigned int events); - -/* drivers/char/random.c */ -extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; -extern void rand_initialize_disk(struct gendisk *disk); - -static inline sector_t get_start_sect(struct block_device *bdev) -{ - return bdev->bd_start_sect; -} - -static inline sector_t bdev_nr_sectors(struct block_device *bdev) -{ - return bdev->bd_nr_sectors; -} - -static inline loff_t bdev_nr_bytes(struct block_device *bdev) -{ - return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; -} - -static inline sector_t get_capacity(struct gendisk *disk) -{ - return bdev_nr_sectors(disk->part0); -} - -static inline u64 sb_bdev_nr_blocks(struct super_block *sb) -{ - return bdev_nr_sectors(sb->s_bdev) >> - (sb->s_blocksize_bits - SECTOR_SHIFT); -} - -int bdev_disk_changed(struct gendisk *disk, bool invalidate); - -struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, - struct lock_class_key *lkclass); -extern void put_disk(struct gendisk *disk); -struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); - -/** - * blk_alloc_disk - allocate a gendisk structure - * @node_id: numa node to allocate on - * - * Allocate and pre-initialize a gendisk structure for use with BIO based - * drivers. - * - * Context: can sleep - */ -#define blk_alloc_disk(node_id) \ -({ \ - static struct lock_class_key __key; \ - \ - __blk_alloc_disk(node_id, &__key); \ -}) -void blk_cleanup_disk(struct gendisk *disk); - -int __register_blkdev(unsigned int major, const char *name, - void (*probe)(dev_t devt)); -#define register_blkdev(major, name) \ - __register_blkdev(major, name, NULL) -void unregister_blkdev(unsigned int major, const char *name); - -bool bdev_check_media_change(struct block_device *bdev); -int __invalidate_device(struct block_device *bdev, bool kill_dirty); -void set_capacity(struct gendisk *disk, sector_t size); - -#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED -int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); -void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); -int bd_register_pending_holders(struct gendisk *disk); -#else -static inline int bd_link_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ - return 0; -} -static inline void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ -} -static inline int bd_register_pending_holders(struct gendisk *disk) -{ - return 0; -} -#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ - -dev_t part_devt(struct gendisk *disk, u8 partno); -void inc_diskseq(struct gendisk *disk); -dev_t blk_lookup_devt(const char *name, int partno); -void blk_request_module(dev_t devt); -#ifdef CONFIG_BLOCK -void printk_all_partitions(void); -#else /* CONFIG_BLOCK */ -static inline void printk_all_partitions(void) -{ -} -#endif /* CONFIG_BLOCK */ - -#endif /* _LINUX_GENHD_H */ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index 6f7949b2fd8d..abeba356bc3f 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -2,7 +2,7 @@ #ifndef _LINUX_PART_STAT_H #define _LINUX_PART_STAT_H -#include +#include #include struct disk_stats { diff --git a/init/do_mounts.c b/init/do_mounts.c index 762b534978d9..7058e14ad5f7 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index e6af502c2fd7..a94044197c4a 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/power/swap.c b/kernel/power/swap.c index ad10359030a4..f1bd03129575 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 320ca80aacab..02882526ba9a 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3-71-gd317 From 0a3140ea0fae377c9eaa031b7db1670ae422ed47 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Mon, 24 Jan 2022 10:11:02 +0100 Subject: block: pass a block_device and opf to blk_next_bio All callers need to set the block_device and operation, so lift that into the common code. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220124091107.642561-15-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 6 +++++- block/blk-lib.c | 19 +++++-------------- block/blk-zoned.c | 9 +++------ block/blk.h | 2 -- drivers/nvme/target/zns.c | 6 +++--- include/linux/bio.h | 3 ++- 6 files changed, 18 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 1536579ed490..a0166f29a05c 100644 --- a/block/bio.c +++ b/block/bio.c @@ -344,10 +344,14 @@ void bio_chain(struct bio *bio, struct bio *parent) } EXPORT_SYMBOL(bio_chain); -struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) +struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, + unsigned int nr_pages, unsigned int opf, gfp_t gfp) { struct bio *new = bio_alloc(gfp, nr_pages); + bio_set_dev(new, bdev); + new->bi_opf = opf; + if (bio) { bio_chain(bio, new); submit_bio(bio); diff --git a/block/blk-lib.c b/block/blk-lib.c index 9245b300ef73..1b8ced45e4e5 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -82,11 +82,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, WARN_ON_ONCE((req_sects << 9) > UINT_MAX); - bio = blk_next_bio(bio, 0, gfp_mask); + bio = blk_next_bio(bio, bdev, 0, op, gfp_mask); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, op, 0); - bio->bi_iter.bi_size = req_sects << 9; sector += req_sects; nr_sects -= req_sects; @@ -176,14 +173,12 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, max_write_same_sectors = bio_allowed_max_sectors(q); while (nr_sects) { - bio = blk_next_bio(bio, 1, gfp_mask); + bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); - bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); if (nr_sects > max_write_same_sectors) { bio->bi_iter.bi_size = max_write_same_sectors << 9; @@ -252,10 +247,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, return -EOPNOTSUPP; while (nr_sects) { - bio = blk_next_bio(bio, 0, gfp_mask); + bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); - bio->bi_opf = REQ_OP_WRITE_ZEROES; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; @@ -303,11 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, return -EPERM; while (nr_sects != 0) { - bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), - gfp_mask); + bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), + REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE, nr_sects << 9); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 774ecc598bee..5ab755d792c8 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -215,9 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev, continue; } - bio = blk_next_bio(bio, 0, gfp_mask); - bio_set_dev(bio, bdev); - bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC; + bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC, + gfp_mask); bio->bi_iter.bi_sector = sector; sector += zone_sectors; @@ -306,9 +305,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, } while (sector < end_sector) { - bio = blk_next_bio(bio, 0, gfp_mask); - bio_set_dev(bio, bdev); - bio->bi_opf = op | REQ_SYNC; + bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask); bio->bi_iter.bi_sector = sector; sector += zone_sectors; diff --git a/block/blk.h b/block/blk.h index 800c5ae387a0..abb663a2a147 100644 --- a/block/blk.h +++ b/block/blk.h @@ -406,8 +406,6 @@ extern int blk_iolatency_init(struct request_queue *q); static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif -struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); - #ifdef CONFIG_BLK_DEV_ZONED void blk_queue_free_zone_bitmaps(struct request_queue *q); void blk_queue_clear_zone_settings(struct request_queue *q); diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 46bc30fe85d2..247de74247fa 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -412,10 +412,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) while (sector < get_capacity(bdev->bd_disk)) { if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { - bio = blk_next_bio(bio, 0, GFP_KERNEL); - bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; + bio = blk_next_bio(bio, bdev, 0, + zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, + GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); /* This may take a while, so be nice to others */ cond_resched(); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 117d7f248ac9..edeae54074ed 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -790,6 +790,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } -struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); +struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, + unsigned int nr_pages, unsigned int opf, gfp_t gfp); #endif /* __LINUX_BIO_H */ -- cgit v1.2.3-71-gd317 From 609be1066731fea86436f5f91022f82e592ab456 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:11:03 +0100 Subject: block: pass a block_device and opf to bio_alloc_bioset Pass the block_device and operation that we plan to use this bio for to bio_alloc_bioset to optimize the assigment. NULL/0 can be passed, both for the passthrough case on a raw request_queue and to temporarily avoid refactoring some nasty code. Also move the gfp_mask argument after the nr_vecs argument for a much more logical calling convention matching what most of the kernel does. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220124091107.642561-16-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 30 ++++++++++++++++++------------ block/bounce.c | 6 ++---- drivers/block/drbd/drbd_actlog.c | 5 ++--- drivers/block/drbd/drbd_bitmap.c | 7 +++---- drivers/md/bcache/request.c | 12 +++++------- drivers/md/dm-crypt.c | 5 ++--- drivers/md/dm-io.c | 5 ++--- drivers/md/dm-writecache.c | 7 ++++--- drivers/md/dm.c | 5 +++-- drivers/md/md.c | 16 ++++++++-------- drivers/md/raid1.c | 3 ++- drivers/md/raid10.c | 6 ++---- drivers/md/raid5-cache.c | 8 ++++---- drivers/md/raid5-ppl.c | 11 +++++------ drivers/target/target_core_iblock.c | 6 ++---- fs/btrfs/extent_io.c | 2 +- fs/f2fs/data.c | 7 +++---- fs/iomap/buffered-io.c | 6 +++--- include/linux/bio.h | 7 ++++--- 19 files changed, 75 insertions(+), 79 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index a0166f29a05c..9afc0c2aca6e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -417,8 +417,10 @@ static void punt_bios_to_rescuer(struct bio_set *bs) /** * bio_alloc_bioset - allocate a bio for I/O + * @bdev: block device to allocate the bio for (can be %NULL) + * @nr_vecs: number of bvecs to pre-allocate + * @opf: operation and flags for bio * @gfp_mask: the GFP_* mask given to the slab allocator - * @nr_iovecs: number of iovecs to pre-allocate * @bs: the bio_set to allocate from. * * Allocate a bio from the mempools in @bs. @@ -447,15 +449,16 @@ static void punt_bios_to_rescuer(struct bio_set *bs) * * Returns: Pointer to new bio on success, NULL on failure. */ -struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs, +struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, + unsigned int opf, gfp_t gfp_mask, struct bio_set *bs) { gfp_t saved_gfp = gfp_mask; struct bio *bio; void *p; - /* should not use nobvec bioset for nr_iovecs > 0 */ - if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0)) + /* should not use nobvec bioset for nr_vecs > 0 */ + if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) return NULL; /* @@ -492,26 +495,29 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs, return NULL; bio = p + bs->front_pad; - if (nr_iovecs > BIO_INLINE_VECS) { + if (nr_vecs > BIO_INLINE_VECS) { struct bio_vec *bvl = NULL; - bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); + bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); if (!bvl && gfp_mask != saved_gfp) { punt_bios_to_rescuer(bs); gfp_mask = saved_gfp; - bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); + bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); } if (unlikely(!bvl)) goto err_free; - bio_init(bio, bvl, nr_iovecs); - } else if (nr_iovecs) { + bio_init(bio, bvl, nr_vecs); + } else if (nr_vecs) { bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); } else { bio_init(bio, NULL, 0); } bio->bi_pool = bs; + if (bdev) + bio_set_dev(bio, bdev); + bio->bi_opf = opf; return bio; err_free: @@ -767,7 +773,7 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) { struct bio *b; - b = bio_alloc_bioset(gfp_mask, 0, bs); + b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs); if (!b) return NULL; @@ -1743,7 +1749,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, struct bio *bio; if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) - return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); + return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); cache = per_cpu_ptr(bs->cache, get_cpu()); if (cache->free_list) { @@ -1757,7 +1763,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, return bio; } put_cpu(); - bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); + bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; } diff --git a/block/bounce.c b/block/bounce.c index 7af1a72835b9..330ddde25b46 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -165,12 +165,10 @@ static struct bio *bounce_clone_bio(struct bio *bio_src) * asking for trouble and would force extra work on * __bio_clone_fast() anyways. */ - bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), - &bounce_bio_set); - bio->bi_bdev = bio_src->bi_bdev; + bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), + bio_src->bi_opf, GFP_NOIO, &bounce_bio_set); if (bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); - bio->bi_opf = bio_src->bi_opf; bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 72cf7603d51f..f5bcded3640d 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, op_flags |= REQ_FUA | REQ_PREFLUSH; op_flags |= REQ_SYNC; - bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); - bio_set_dev(bio, bdev->md_bdev); + bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO, + &drbd_md_io_bio_set); bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, device->md_io.page, size, 0) != size) goto out; bio->bi_private = device; bio->bi_end_io = drbd_md_endio; - bio_set_op_attrs(bio, op, op_flags); if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index c1f816f896a8..df25eecf80af 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio) static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set); struct drbd_device *device = ctx->device; + unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; + struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, + GFP_NOIO, &drbd_md_io_bio_set); struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; - unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; @@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; - bio_set_dev(bio, device->ldev->md_bdev); bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; - bio_set_op_attrs(bio, op, 0); if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio_io_error(bio); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d15aae6c51c1..c4b7e434de8a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -913,14 +913,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; - cache_bio = bio_alloc_bioset(GFP_NOWAIT, + cache_bio = bio_alloc_bioset(miss->bi_bdev, DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), - &dc->disk.bio_split); + 0, GFP_NOWAIT, &dc->disk.bio_split); if (!cache_bio) goto out_submit; cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; - bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = backing_request_endio; @@ -1025,16 +1024,15 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) */ struct bio *flush; - flush = bio_alloc_bioset(GFP_NOIO, 0, - &dc->disk.bio_split); + flush = bio_alloc_bioset(bio->bi_bdev, 0, + REQ_OP_WRITE | REQ_PREFLUSH, + GFP_NOIO, &dc->disk.bio_split); if (!flush) { s->iop.status = BLK_STS_RESOURCE; goto insert_data; } - bio_copy_dev(flush, bio); flush->bi_end_io = backing_request_endio; flush->bi_private = cl; - flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, flush, cl); } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3c5ecd35d348..f7e4435b7439 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1672,11 +1672,10 @@ retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_lock(&cc->bio_alloc_lock); - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); + clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf, + GFP_NOIO, &cc->bs); clone->bi_private = io; clone->bi_end_io = crypt_endio; - bio_set_dev(clone, cc->dev->bdev); - clone->bi_opf = io->base_bio->bi_opf; remaining_size = size; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2d3cda0acacb..23e038f8dc84 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -345,11 +345,10 @@ static void do_region(int op, int op_flags, unsigned region, (PAGE_SIZE >> SECTOR_SHIFT))); } - bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios); + bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, + GFP_NOIO, &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); - bio_set_dev(bio, where->bdev); bio->bi_end_io = endio; - bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 4f31591d2d25..5630b470ba42 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1821,11 +1821,11 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba max_pages = e->wc_list_contiguous; - bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); + bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE, + GFP_NOIO, &wc->bio_set); wb = container_of(bio, struct writeback_struct, bio); wb->wc = wc; bio->bi_end_io = writecache_writeback_endio; - bio_set_dev(bio, wc->dev->bdev); bio->bi_iter.bi_sector = read_original_sector(wc, e); if (max_pages <= WB_LIST_INLINE || unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), @@ -1852,7 +1852,8 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba wb->wc_list[wb->wc_list_n++] = f; e = f; } - bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); + if (WC_MODE_FUA(wc)) + bio->bi_opf |= REQ_FUA; if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e431f72c10bf..069e29013b6b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -519,7 +519,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); + clone = bio_alloc_bioset(NULL, 0, 0, GFP_NOIO, &md->io_bs); tio = container_of(clone, struct dm_target_io, clone); tio->inside_dm_io = true; @@ -552,7 +552,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *t /* the dm_target_io embedded in ci->io is available */ tio = &ci->io->tio; } else { - struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); + struct bio *clone = bio_alloc_bioset(NULL, 0, 0, gfp_mask, + &ci->io->md->bs); if (!clone) return NULL; diff --git a/drivers/md/md.c b/drivers/md/md.c index 5881d05a76eb..40fc1f7e65c5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -562,11 +562,11 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set); + bi = bio_alloc_bioset(rdev->bdev, 0, + REQ_OP_WRITE | REQ_PREFLUSH, + GFP_NOIO, &mddev->bio_set); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; - bio_set_dev(bi, rdev->bdev); - bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; atomic_inc(&mddev->flush_pending); submit_bio(bi); rcu_read_lock(); @@ -955,7 +955,6 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, * If an error occurred, call md_error */ struct bio *bio; - int ff = 0; if (!page) return; @@ -963,11 +962,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(Faulty, &rdev->flags)) return; - bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, + 1, + REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, + GFP_NOIO, &mddev->sync_set); atomic_inc(&rdev->nr_pending); - bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; @@ -976,8 +977,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && test_bit(FailFast, &rdev->flags) && !test_bit(LastDev, &rdev->flags)) - ff = MD_FAILFAST; - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; + bio->bi_opf |= MD_FAILFAST; atomic_inc(&mddev->pending_writes); submit_bio(bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2d8acb1e988..43276f8fdc81 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1126,7 +1126,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, int i = 0; struct bio *behind_bio = NULL; - behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); + behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO, + &r1_bio->mddev->bio_set); if (!behind_bio) return; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2b969f70a31f..cb7c58050708 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4892,14 +4892,12 @@ read_more: return sectors_done; } - read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); - - bio_set_dev(read_bio, rdev->bdev); + read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ, + GFP_KERNEL, &mddev->bio_set); read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_reshape_read; - bio_set_op_attrs(read_bio, REQ_OP_READ, 0); r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0b5dcaabbc15..66313adf9987 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -735,10 +735,9 @@ static void r5l_submit_current_io(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log) { - struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs); + struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS, + REQ_OP_WRITE, GFP_NOIO, &log->bs); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; return bio; @@ -1634,7 +1633,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, { struct page *page; - ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs); + ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL, + &log->bs); if (!ctx->ra_bio) return -ENOMEM; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 4ab417915d7f..054d3bb252d4 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -496,11 +496,10 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { struct bio *prev = bio; - bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, + bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS, + prev->bi_opf, GFP_NOIO, &ppl_conf->bs); - bio->bi_opf = prev->bi_opf; bio->bi_write_hint = prev->bi_write_hint; - bio_copy_dev(bio, prev); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -637,10 +636,10 @@ static void ppl_do_flush(struct ppl_io_unit *io) struct bio *bio; char b[BDEVNAME_SIZE]; - bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs); - bio_set_dev(bio, bdev); + bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, + REQ_OP_WRITE | REQ_PREFLUSH, + &ppl_conf->flush_bs); bio->bi_private = io; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_end_io = ppl_flush_endio; pr_debug("%s: dev: %s\n", __func__, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 6045678365a5..3c92ba374819 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -352,18 +352,16 @@ static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, * Only allocate as many vector entries as the bio code allows us to, * we'll loop later on until we have handled the whole request. */ - bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num), - &ib_dev->ibd_bio_set); + bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, + GFP_NOIO, &ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); return NULL; } - bio_set_dev(bio, ib_dev->ibd_bd); bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; - bio->bi_opf = opf; return bio; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 409bad3928db..421d921a0571 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3143,7 +3143,7 @@ struct bio *btrfs_bio_alloc(unsigned int nr_iovecs) struct bio *bio; ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS); - bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset); + bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset); btrfs_bio_init(btrfs_bio(bio)); return bio; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 8c417864c66a..e71dde8de0db 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -394,7 +394,7 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) struct f2fs_sb_info *sbi = fio->sbi; struct bio *bio; - bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset); + bio = bio_alloc_bioset(NULL, npages, 0, GFP_NOIO, &f2fs_bioset); f2fs_target_device(sbi, fio->new_blkaddr, bio); if (is_read_io(fio->op)) { @@ -985,8 +985,8 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, struct bio_post_read_ctx *ctx = NULL; unsigned int post_read_steps = 0; - bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL, - bio_max_segs(nr_pages), &f2fs_bioset); + bio = bio_alloc_bioset(NULL, bio_max_segs(nr_pages), REQ_OP_READ, + for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset); if (!bio) return ERR_PTR(-ENOMEM); @@ -994,7 +994,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, op_flag); if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= STEP_DECRYPT; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c938bbad075e..340d373cb1bf 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1196,10 +1196,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend; struct bio *bio; - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); - bio_set_dev(bio, wpc->iomap.bdev); + bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, + REQ_OP_WRITE | wbc_to_write_flags(wbc), + GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; - bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); bio->bi_write_hint = inode->i_write_hint; wbc_init_bio(wbc, bio); diff --git a/include/linux/bio.h b/include/linux/bio.h index edeae54074ed..2f63ae9a71e1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -405,8 +405,9 @@ extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); -struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, - struct bio_set *bs); +struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, + unsigned int opf, gfp_t gfp_mask, + struct bio_set *bs); struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); @@ -419,7 +420,7 @@ extern struct bio_set fs_bio_set; static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) { - return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); + return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set); } void submit_bio(struct bio *bio); -- cgit v1.2.3-71-gd317 From b77c88c2100ce6a5ec8126c13599b5a7f6663e32 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:11:04 +0100 Subject: block: pass a block_device and opf to bio_alloc_kiocb Pass the block_device and operation that we plan to use this bio for to bio_alloc_kiocb to optimize the assigment. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220124091107.642561-17-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 12 ++++++++---- block/fops.c | 17 ++++++++--------- include/linux/bio.h | 4 ++-- 3 files changed, 18 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 9afc0c2aca6e..6c3efb0fd12b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1731,7 +1731,9 @@ EXPORT_SYMBOL(bioset_init_from_src); /** * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb * @kiocb: kiocb describing the IO + * @bdev: block device to allocate the bio for (can be %NULL) * @nr_vecs: number of iovecs to pre-allocate + * @opf: operation and flags for bio * @bs: bio_set to allocate from * * Description: @@ -1742,14 +1744,14 @@ EXPORT_SYMBOL(bioset_init_from_src); * MUST be done from process context, not hard/soft IRQ. * */ -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, - struct bio_set *bs) +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, + unsigned short nr_vecs, unsigned int opf, struct bio_set *bs) { struct bio_alloc_cache *cache; struct bio *bio; if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) - return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); + return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); cache = per_cpu_ptr(bs->cache, get_cpu()); if (cache->free_list) { @@ -1758,12 +1760,14 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, cache->nr--; put_cpu(); bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); + bio_set_dev(bio, bdev); + bio->bi_opf = opf; bio->bi_pool = bs; bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; } put_cpu(); - bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs); + bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; } diff --git a/block/fops.c b/block/fops.c index 26bf15c770d2..3a62b8b91275 100644 --- a/block/fops.c +++ b/block/fops.c @@ -190,6 +190,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blkdev_dio *dio; struct bio *bio; bool is_read = (iov_iter_rw(iter) == READ), is_sync; + unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); loff_t pos = iocb->ki_pos; int ret = 0; @@ -197,7 +198,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); atomic_set(&dio->ref, 1); @@ -223,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, blk_start_plug(&plug); for (;;) { - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = iocb->ki_hint; bio->bi_private = dio; @@ -238,11 +238,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, } if (is_read) { - bio->bi_opf = REQ_OP_READ; if (dio->flags & DIO_SHOULD_DIRTY) bio_set_pages_dirty(bio); } else { - bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } if (iocb->ki_flags & IOCB_NOWAIT) @@ -259,6 +257,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, atomic_inc(&dio->ref); submit_bio(bio); bio = bio_alloc(GFP_KERNEL, nr_pages); + bio_set_dev(bio, bdev); + bio->bi_opf = opf; } blk_finish_plug(&plug); @@ -311,6 +311,8 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, unsigned int nr_pages) { struct block_device *bdev = iocb->ki_filp->private_data; + bool is_read = iov_iter_rw(iter) == READ; + unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); struct blkdev_dio *dio; struct bio *bio; loff_t pos = iocb->ki_pos; @@ -320,11 +322,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); dio->flags = 0; dio->iocb = iocb; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = iocb->ki_hint; bio->bi_end_io = blkdev_bio_end_io_async; @@ -347,14 +348,12 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, } dio->size = bio->bi_iter.bi_size; - if (iov_iter_rw(iter) == READ) { - bio->bi_opf = REQ_OP_READ; + if (is_read) { if (iter_is_iovec(iter)) { dio->flags |= DIO_SHOULD_DIRTY; bio_set_pages_dirty(bio); } } else { - bio->bi_opf = dio_bio_write_op(iocb); task_io_account_write(bio->bi_iter.bi_size); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 2f63ae9a71e1..5c5ada2ebb27 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -408,8 +408,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask, struct bio_set *bs); -struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, - struct bio_set *bs); +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, + unsigned short nr_vecs, unsigned int opf, struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); -- cgit v1.2.3-71-gd317 From 07888c665b405b1cd3577ddebfeb74f4717a84c4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:11:05 +0100 Subject: block: pass a block_device and opf to bio_alloc Pass the block_device and operation that we plan to use this bio for to bio_alloc to optimize the assignment. NULL/0 can be passed, both for the passthrough case on a raw request_queue and to temporarily avoid refactoring some nasty code. Also move the gfp_mask argument after the nr_vecs argument for a much more logical calling convention matching what most of the kernel does. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220124091107.642561-18-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 5 +---- block/fops.c | 4 +--- drivers/block/drbd/drbd_receiver.c | 10 ++++------ drivers/block/rnbd/rnbd-srv.c | 5 ++--- drivers/block/xen-blkback/blkback.c | 11 +++++------ drivers/block/zram/zram_drv.c | 11 ++++------- drivers/md/dm-log-writes.c | 21 ++++++++------------- drivers/md/dm-thin.c | 9 ++++----- drivers/md/dm-zoned-metadata.c | 15 ++++++--------- drivers/nvdimm/nd_virtio.c | 6 +++--- drivers/nvme/target/io-cmd-bdev.c | 12 ++++++------ drivers/nvme/target/passthru.c | 5 +++-- drivers/nvme/target/zns.c | 6 +++--- drivers/scsi/ufs/ufshpb.c | 4 ++-- drivers/target/target_core_iblock.c | 5 ++--- fs/btrfs/disk-io.c | 6 +++--- fs/buffer.c | 14 ++++++-------- fs/crypto/bio.c | 13 +++++++------ fs/direct-io.c | 5 +---- fs/erofs/zdata.c | 5 ++--- fs/ext4/page-io.c | 3 +-- fs/ext4/readpage.c | 8 ++++---- fs/gfs2/lops.c | 8 +++----- fs/gfs2/meta_io.c | 4 +--- fs/gfs2/ops_fstype.c | 4 +--- fs/hfsplus/wrapper.c | 4 +--- fs/iomap/buffered-io.c | 16 ++++++++-------- fs/iomap/direct-io.c | 8 ++------ fs/jfs/jfs_logmgr.c | 11 ++--------- fs/jfs/jfs_metapage.c | 9 +++------ fs/mpage.c | 7 +++---- fs/nfs/blocklayout/blocklayout.c | 4 +--- fs/nilfs2/segbuf.c | 4 ++-- fs/ntfs3/fsntfs.c | 8 ++------ fs/ocfs2/cluster/heartbeat.c | 4 +--- fs/squashfs/block.c | 11 ++++++----- fs/xfs/xfs_bio_io.c | 10 ++++------ fs/xfs/xfs_buf.c | 4 +--- fs/zonefs/super.c | 5 ++--- include/linux/bio.h | 5 +++-- kernel/power/swap.c | 5 ++--- mm/page_io.c | 10 ++++------ 42 files changed, 130 insertions(+), 194 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 6c3efb0fd12b..b73c9babd583 100644 --- a/block/bio.c +++ b/block/bio.c @@ -347,10 +347,7 @@ EXPORT_SYMBOL(bio_chain); struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, unsigned int opf, gfp_t gfp) { - struct bio *new = bio_alloc(gfp, nr_pages); - - bio_set_dev(new, bdev); - new->bi_opf = opf; + struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); if (bio) { bio_chain(bio, new); diff --git a/block/fops.c b/block/fops.c index 3a62b8b91275..c68359684773 100644 --- a/block/fops.c +++ b/block/fops.c @@ -256,9 +256,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, } atomic_inc(&dio->ref); submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, nr_pages); - bio_set_dev(bio, bdev); - bio->bi_opf = opf; + bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); } blk_finish_plug(&plug); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fb59b263deee..04e3ec12d8b4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1279,7 +1279,8 @@ static void one_flush_endio(struct bio *bio) static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) { - struct bio *bio = bio_alloc(GFP_NOIO, 0); + struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0, + REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO); struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); if (!octx) { @@ -1297,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont octx->device = device; octx->ctx = ctx; - bio_set_dev(bio, device->ldev->backing_bdev); bio->bi_private = octx; bio->bi_end_io = one_flush_endio; - bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; device->flush_jif = jiffies; set_bit(FLUSH_PENDING, &device->flags); @@ -1685,11 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device, * generated bio, but a bio allocated on behalf of the peer. */ next_bio: - bio = bio_alloc(GFP_NOIO, nr_pages); + bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags, + GFP_NOIO); /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, device->ldev->backing_bdev); - bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index ff9b38997607..132e950685d5 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -149,7 +149,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, priv->sess_dev = sess_dev; priv->id = id; - bio = bio_alloc(GFP_KERNEL, 1); + bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1, + rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); if (bio_add_page(bio, virt_to_page(data), datalen, offset_in_page(data)) != datalen) { rnbd_srv_err(sess_dev, "Failed to map data to bio\n"); @@ -159,13 +160,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, bio->bi_end_io = rnbd_dev_bi_end_io; bio->bi_private = priv; - bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); bio->bi_iter.bi_sector = le64_to_cpu(msg->sector); bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size); prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); bio_set_prio(bio, prio); - bio_set_dev(bio, sess_dev->rnbd_dev->bdev); submit_bio(bio); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 6bb2ad769206..d1e26461a64e 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1326,13 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i)); + bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i), + operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; - bio_set_op_attrs(bio, operation, operation_flags); } preq.sector_number += seg[i].nsec; @@ -1342,12 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (!bio) { BUG_ON(operation_flags != REQ_PREFLUSH); - bio = bio_alloc(GFP_KERNEL, 0); + bio = bio_alloc(preq.bdev, 0, operation | operation_flags, + GFP_KERNEL); biolist[nbio++] = bio; - bio_set_dev(bio, preq.bdev); bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio_set_op_attrs(bio, operation, operation_flags); } atomic_set(&pending_req->pendcnt, nbio); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 342dbcb3f220..f3fe0ea8aa80 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -616,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, { struct bio *bio; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ, + GFP_NOIO); if (!bio) return -ENOMEM; bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - bio_set_dev(bio, zram->bdev); if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { bio_put(bio); return -EIO; } - if (!parent) { - bio->bi_opf = REQ_OP_READ; + if (!parent) bio->bi_end_io = zram_page_end_io; - } else { - bio->bi_opf = parent->bi_opf; + else bio_chain(bio, parent); - } submit_bio(bio); return 1; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 25f5e8d2d417..c9d036d6bb2e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -217,14 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry, void *ptr; size_t ret; - bio = bio_alloc(GFP_KERNEL, 1); + bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? log_end_super : log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); page = alloc_page(GFP_KERNEL); if (!page) { @@ -271,13 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, bio_pages); + bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE, + GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < bio_pages; i++) { pg_datalen = min_t(int, datalen, PAGE_SIZE); @@ -353,13 +350,12 @@ static int log_one_block(struct log_writes_c *lc, goto out; atomic_inc(&lc->io_blocks); - bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt)); + bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt), + REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < block->vec_cnt; i++) { /* @@ -371,14 +367,13 @@ static int log_one_block(struct log_writes_c *lc, if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); submit_bio(bio); - bio = bio_alloc(GFP_KERNEL, - bio_max_segs(block->vec_cnt - i)); + bio = bio_alloc(lc->logdev->bdev, + bio_max_segs(block->vec_cnt - i), + REQ_OP_WRITE, GFP_KERNEL); bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, lc->logdev->bdev); bio->bi_end_io = log_end_io; bio->bi_private = lc; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ret = bio_add_page(bio, block->vecs[i].bv_page, block->vecs[i].bv_len, 0); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 411a3f56ed90..f4234d615aa1 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1177,13 +1177,12 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } - discard_parent = bio_alloc(GFP_NOIO, 1); + discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO); discard_parent->bi_end_io = passdown_endio; discard_parent->bi_private = m; - - if (m->maybe_shared) - passdown_double_checking_shared_status(m, discard_parent); - else { + if (m->maybe_shared) + passdown_double_checking_shared_status(m, discard_parent); + else { struct discard_op op; begin_discard(&op, tc, discard_parent); diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 5718b83cc718..e5f1eb27ce2e 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -550,7 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, if (!mblk) return ERR_PTR(-ENOMEM); - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO, + GFP_NOIO); spin_lock(&zmd->mblk_lock); @@ -574,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, /* Submit read BIO */ bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); @@ -721,15 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO, + GFP_NOIO); set_bit(DMZ_META_WRITING, &mblk->state); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); bio->bi_private = mblk; bio->bi_end_io = dmz_mblock_bio_end_io; - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); @@ -751,10 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op, if (dmz_bdev_is_dying(dev)) return -EIO; - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO, + GFP_NOIO); bio->bi_iter.bi_sector = dmz_blk2sect(block); - bio_set_dev(bio, dev->bdev); - bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); ret = submit_bio_wait(bio); bio_put(bio); diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 10351d5b49fa..c6a648fd8744 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) * parent bio. Otherwise directly call nd_region flush. */ if (bio && bio->bi_iter.bi_sector != -1) { - struct bio *child = bio_alloc(GFP_ATOMIC, 0); + struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH, + GFP_ATOMIC); if (!child) return -ENOMEM; - bio_copy_dev(child, bio); - child->bi_opf = REQ_PREFLUSH; + bio_clone_blkg_association(child, bio); child->bi_iter.bi_sector = -1; bio_chain(child, bio); submit_bio(child); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 70ca9dfc1771..e092af3abc71 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -268,14 +268,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio_set_dev(bio, req->ns->bdev); + bio->bi_opf = op; } else { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, + GFP_KERNEL); } - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio->bi_opf = op; blk_start_plug(&plug); if (req->metadata_len) @@ -296,10 +297,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } } - bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); - bio_set_dev(bio, req->ns->bdev); + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), + op, GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio->bi_opf = op; bio_chain(bio, prev); submit_bio(prev); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 9e5b89ae29df..38f72968c3fd 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -207,11 +207,12 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) if (nvmet_use_inline_bvec(req)) { bio = &req->p.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio->bi_opf = req_op(rq); } else { - bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt)); + bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), + GFP_KERNEL); bio->bi_end_io = bio_put; } - bio->bi_opf = req_op(rq); for_each_sg(req->sg, sg, req->sg_cnt, i) { if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 247de74247fa..62c53e8f26d3 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -522,6 +522,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio) void nvmet_bdev_execute_zone_append(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); + const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; @@ -552,13 +553,12 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + bio->bi_opf = op; } else { - bio = bio_alloc(GFP_KERNEL, req->sg_cnt); + bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); } - bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; bio->bi_end_io = nvmet_bdev_zone_append_bio_done; - bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sect; bio->bi_private = req; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 2d36a0715fca..8970068314ef 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, if (!map_req) return NULL; - bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); + bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL); if (!bio) { ufshpb_put_req(hpb, map_req); return NULL; @@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) INIT_LIST_HEAD(&pre_req->list_req); pre_req->req = NULL; - pre_req->bio = bio_alloc(GFP_KERNEL, 1); + pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL); if (!pre_req->bio) goto release_mem; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3c92ba374819..87ede165ddba 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -415,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd) if (immed) target_complete_cmd(cmd, SAM_STAT_GOOD); - bio = bio_alloc(GFP_KERNEL, 0); + bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH, + GFP_KERNEL); bio->bi_end_io = iblock_end_io_flush; - bio_set_dev(bio, ib_dev->ibd_bd); - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; if (!immed) bio->bi_private = cmd; submit_bio(bio); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 87a5addbedf6..f45aa506f9a6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4029,8 +4029,9 @@ static int write_dev_supers(struct btrfs_device *device, * to do I/O, so we don't lose the ability to do integrity * checking. */ - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, device->bdev); + bio = bio_alloc(device->bdev, 1, + REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, + GFP_NOFS); bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; bio->bi_private = device; bio->bi_end_io = btrfs_end_super_write; @@ -4042,7 +4043,6 @@ static int write_dev_supers(struct btrfs_device *device, * go down lazy and there's a short window where the on-disk * copies might still contain the older version. */ - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO; if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) bio->bi_opf |= REQ_FUA; diff --git a/fs/buffer.c b/fs/buffer.c index 8e112b6bd371..a17c386a142c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3024,12 +3024,16 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) clear_buffer_write_io_error(bh); - bio = bio_alloc(GFP_NOIO, 1); + if (buffer_meta(bh)) + op_flags |= REQ_META; + if (buffer_prio(bh)) + op_flags |= REQ_PRIO; + + bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); @@ -3038,12 +3042,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; - if (buffer_meta(bh)) - op_flags |= REQ_META; - if (buffer_prio(bh)) - op_flags |= REQ_PRIO; - bio_set_op_attrs(bio, op, op_flags); - /* Take care of bh's that straddle the end of the device */ guard_bio_eod(bio); diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index bfc2a5b74ed3..755e985a42e0 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -54,7 +54,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, int num_pages = 0; /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); + bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE, + GFP_NOFS); while (len) { unsigned int blocks_this_page = min(len, blocks_per_page); @@ -62,10 +63,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, if (num_pages == 0) { fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); - bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - SECTOR_SHIFT); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); } ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); if (WARN_ON(ret != bytes_this_page)) { @@ -82,6 +81,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, if (err) goto out; bio_reset(bio); + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_opf = REQ_OP_WRITE; num_pages = 0; } } @@ -150,12 +151,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, return -EINVAL; /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ - bio = bio_alloc(GFP_NOFS, nr_pages); + bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS); do { - bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); i = 0; offset = 0; @@ -183,6 +182,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, if (err) goto out; bio_reset(bio); + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_opf = REQ_OP_WRITE; } while (len != 0); err = 0; out: diff --git a/fs/direct-io.c b/fs/direct-io.c index 654443558047..38bca4980a1c 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -396,11 +396,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, * bio_alloc() is guaranteed to return a bio when allowed to sleep and * we request a valid number of vectors. */ - bio = bio_alloc(GFP_KERNEL, nr_vecs); - - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, nr_vecs, dio->op | dio->op_flags, GFP_KERNEL); bio->bi_iter.bi_sector = first_sector; - bio_set_op_attrs(bio, dio->op, dio->op_flags); if (dio->is_async) bio->bi_end_io = dio_bio_end_aio; else diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 498b7666efe8..db7de2dbac73 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1371,15 +1371,14 @@ submit_bio_retry: } if (!bio) { - bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); + bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, + REQ_OP_READ, GFP_NOIO); bio->bi_end_io = z_erofs_decompressqueue_endio; - bio_set_dev(bio, mdev.m_bdev); last_bdev = mdev.m_bdev; bio->bi_iter.bi_sector = (sector_t)cur << LOG_SECTORS_PER_BLOCK; bio->bi_private = bi_private; - bio->bi_opf = REQ_OP_READ; if (f->readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 1d370364230e..125398226873 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -398,10 +398,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io, * bio_alloc will _always_ be able to allocate a bio if * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). */ - bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); + bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, 0, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 4cd62f1d848c..1aa26d6634fc 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -365,15 +365,15 @@ int ext4_mpage_readpages(struct inode *inode, * bio_alloc will _always_ be able to allocate a bio if * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). */ - bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages)); + bio = bio_alloc(bdev, bio_max_segs(nr_pages), + REQ_OP_READ, GFP_KERNEL); fscrypt_set_bio_crypt_ctx(bio, inode, next_block, GFP_KERNEL); ext4_set_bio_post_read_ctx(bio, inode, page->index); - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, - rac ? REQ_RAHEAD : 0); + if (rac) + bio->bi_opf |= REQ_RAHEAD; } length = first_hole << blkbits; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index ca0bb3a73912..4ae1eefae616 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -265,10 +265,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) { struct super_block *sb = sdp->sd_vfs; - struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); + struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO); bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; - bio_set_dev(bio, sb->s_bdev); bio->bi_end_io = end_io; bio->bi_private = sdp; @@ -489,10 +488,9 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) { struct bio *new; - new = bio_alloc(GFP_NOIO, nr_iovecs); - bio_copy_dev(new, prev); + new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); + bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - new->bi_opf = prev->bi_opf; new->bi_write_hint = prev->bi_write_hint; bio_chain(new, prev); submit_bio(prev); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 72d30a682ece..a580b90b7522 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -222,9 +222,8 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], struct buffer_head *bh = *bhs; struct bio *bio; - bio = bio_alloc(GFP_NOIO, num); + bio = bio_alloc(bh->b_bdev, num, op | op_flags, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio_set_dev(bio, bh->b_bdev); while (num > 0) { bh = *bhs; if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { @@ -235,7 +234,6 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], num--; } bio->bi_end_io = gfs2_meta_read_endio; - bio_set_op_attrs(bio, op, op_flags); submit_bio(bio); } } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 7f8410d8fdc1..c9b423c874a3 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -251,14 +251,12 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) ClearPageDirty(page); lock_page(page); - bio = bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS); bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); - bio_set_dev(bio, sb->s_bdev); bio_add_page(bio, page, PAGE_SIZE, 0); bio->bi_end_io = end_bio_io_page; bio->bi_private = page; - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META); submit_bio(bio); wait_on_page_locked(page); bio_put(bio); diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 4688cc7b3692..0b8ad6586df5 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -63,10 +63,8 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, offset = start & (io_size - 1); sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); - bio = bio_alloc(GFP_NOIO, 1); + bio = bio_alloc(sb->s_bdev, 1, op | op_flags, GFP_NOIO); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, sb->s_bdev); - bio_set_op_attrs(bio, op, op_flags); if (op != WRITE && data) *data = (u8 *)buf + offset; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 340d373cb1bf..70f3657a6ec0 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -290,19 +290,20 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; - ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); + ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), + REQ_OP_READ, gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_readpage does. */ - if (!ctx->bio) - ctx->bio = bio_alloc(orig_gfp, 1); - ctx->bio->bi_opf = REQ_OP_READ; + if (!ctx->bio) { + ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, + orig_gfp); + } if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; - bio_set_dev(ctx->bio, iomap->bdev); ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio(ctx->bio, folio, plen, poff); } @@ -1226,10 +1227,9 @@ iomap_chain_bio(struct bio *prev) { struct bio *new; - new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); - bio_copy_dev(new, prev);/* also copies over blkcg information */ + new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); + bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - new->bi_opf = prev->bi_opf; new->bi_write_hint = prev->bi_write_hint; bio_chain(prev, new); diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 03ea367df19a..e2ba13645ef2 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -183,15 +183,13 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, int flags = REQ_SYNC | REQ_IDLE; struct bio *bio; - bio = bio_alloc(GFP_KERNEL, 1); - bio_set_dev(bio, iter->iomap.bdev); + bio = bio_alloc(iter->iomap.bdev, 1, REQ_OP_WRITE | flags, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; get_page(page); __bio_add_page(bio, page, len, 0); - bio_set_op_attrs(bio, REQ_OP_WRITE, flags); iomap_dio_submit_bio(iter, dio, bio, pos); } @@ -309,14 +307,12 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, goto out; } - bio = bio_alloc(GFP_KERNEL, nr_pages); - bio_set_dev(bio, iomap->bdev); + bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(iomap, pos); bio->bi_write_hint = dio->iocb->ki_hint; bio->bi_ioprio = dio->iocb->ki_ioprio; bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; - bio->bi_opf = bio_opf; ret = bio_iov_iter_get_pages(bio, dio->submit.iter); if (unlikely(ret)) { diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 78fd136ac13b..997c81fcea34 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1980,17 +1980,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) bp->l_flag |= lbmREAD; - bio = bio_alloc(GFP_NOFS, 1); - + bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); - bio_set_dev(bio, log->bdev); - bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); bio->bi_end_io = lbmIODone; bio->bi_private = bp; - bio->bi_opf = REQ_OP_READ; /*check if journaling to disk has been disabled*/ if (log->no_integrity) { bio->bi_iter.bi_size = 0; @@ -2125,16 +2121,13 @@ static void lbmStartIO(struct lbuf * bp) jfs_info("lbmStartIO"); - bio = bio_alloc(GFP_NOFS, 1); + bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); - bio_set_dev(bio, log->bdev); - bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); bio->bi_end_io = lbmIODone; bio->bi_private = bp; - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; /* check if journaling to disk has been disabled */ if (log->no_integrity) { diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 104ae698443e..fde1a9cf902e 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -417,12 +417,10 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) } len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, inode->i_sb->s_bdev); + bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_write_end_io; bio->bi_private = page; - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); /* Don't call bio_add_page yet, we may add to this vec */ bio_offset = offset; @@ -497,13 +495,12 @@ static int metapage_readpage(struct file *fp, struct page *page) if (bio) submit_bio(bio); - bio = bio_alloc(GFP_NOFS, 1); - bio_set_dev(bio, inode->i_sb->s_bdev); + bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ, + GFP_NOFS); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_read_end_io; bio->bi_private = page; - bio_set_op_attrs(bio, REQ_OP_READ, 0); len = xlen << inode->i_blkbits; offset = block_offset << inode->i_blkbits; if (bio_add_page(bio, page, len, offset) < len) diff --git a/fs/mpage.c b/fs/mpage.c index 06e95d777e94..dbfc02e23d97 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -273,10 +273,10 @@ alloc_new: page)) goto out; } - args->bio = bio_alloc(gfp, bio_max_segs(args->nr_pages)); + args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), 0, + gfp); if (args->bio == NULL) goto confused; - bio_set_dev(args->bio, bdev); args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); } @@ -586,8 +586,7 @@ alloc_new: page, wbc)) goto out; } - bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, BIO_MAX_VECS, 0, GFP_NOFS); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); wbc_init_bio(wbc, bio); diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 38e063af7e98..79a8b451791f 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -154,12 +154,10 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, retry: if (!bio) { - bio = bio_alloc(GFP_NOIO, bio_max_segs(npg)); + bio = bio_alloc(map->bdev, bio_max_segs(npg), rw, GFP_NOIO); bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT; - bio_set_dev(bio, map->bdev); bio->bi_end_io = end_io; bio->bi_private = par; - bio_set_op_attrs(bio, rw, 0); } if (bio_add_page(bio, page, *len, offset) < *len) { bio = bl_submit_bio(bio); diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 53b7c6d21cdd..4f71faacd825 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -391,8 +391,8 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, BUG_ON(wi->nr_vecs <= 0); repeat: if (!wi->bio) { - wi->bio = bio_alloc(GFP_NOIO, wi->nr_vecs); - bio_set_dev(wi->bio, wi->nilfs->ns_bdev); + wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs, 0, + GFP_NOIO); wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) << (wi->nilfs->ns_blocksize_bits - 9); } diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 4a255e21ecf5..0660a07c5a96 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -1485,15 +1485,13 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, lbo = ((u64)lcn << cluster_bits) + off; len = ((u64)clen << cluster_bits) - off; new_bio: - new = bio_alloc(GFP_NOFS, nr_pages - page_idx); + new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS); if (bio) { bio_chain(bio, new); submit_bio(bio); } bio = new; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = lbo >> 9; - bio->bi_opf = op; while (len) { off = vbo & (PAGE_SIZE - 1); @@ -1584,14 +1582,12 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run) lbo = (u64)lcn << cluster_bits; len = (u64)clen << cluster_bits; new_bio: - new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); + new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS); if (bio) { bio_chain(bio, new); submit_bio(bio); } bio = new; - bio_set_dev(bio, bdev); - bio->bi_opf = REQ_OP_WRITE; bio->bi_iter.bi_sector = lbo >> 9; for (;;) { diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a17be1618bf7..ea0e70c0fce0 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -518,7 +518,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, * GFP_KERNEL that the local node can get fenced. It would be * nicest if we could pre-allocate these bios and avoid this * all together. */ - bio = bio_alloc(GFP_ATOMIC, 16); + bio = bio_alloc(reg->hr_bdev, 16, op | op_flags, GFP_ATOMIC); if (!bio) { mlog(ML_ERROR, "Could not alloc slots BIO!\n"); bio = ERR_PTR(-ENOMEM); @@ -527,10 +527,8 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, /* Must put everything in 512 byte sectors for the bio... */ bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); - bio_set_dev(bio, reg->hr_bdev); bio->bi_private = wc; bio->bi_end_io = o2hb_bio_end_io; - bio_set_op_attrs(bio, op, op_flags); vec_start = (cs << bits) % PAGE_SIZE; while(cs < max_slots) { diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2db8bcf7ff85..622c844f6d11 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -86,16 +86,17 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, int error, i; struct bio *bio; - if (page_count <= BIO_MAX_VECS) - bio = bio_alloc(GFP_NOIO, page_count); - else + if (page_count <= BIO_MAX_VECS) { + bio = bio_alloc(sb->s_bdev, page_count, REQ_OP_READ, GFP_NOIO); + } else { bio = bio_kmalloc(GFP_NOIO, page_count); + bio_set_dev(bio, sb->s_bdev); + bio->bi_opf = REQ_OP_READ; + } if (!bio) return -ENOMEM; - bio_set_dev(bio, sb->s_bdev); - bio->bi_opf = READ; bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); for (i = 0; i < page_count; ++i) { diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c index 667e297f59b1..eff4a9f21dcf 100644 --- a/fs/xfs/xfs_bio_io.c +++ b/fs/xfs/xfs_bio_io.c @@ -61,10 +61,9 @@ xfs_rw_bdev( if (is_vmalloc && op == REQ_OP_WRITE) flush_kernel_vmap_range(data, count); - bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC, + GFP_KERNEL); bio->bi_iter.bi_sector = sector; - bio->bi_opf = op | REQ_META | REQ_SYNC; do { struct page *page = kmem_to_page(data); @@ -74,10 +73,9 @@ xfs_rw_bdev( while (bio_add_page(bio, page, len, off) != len) { struct bio *prev = bio; - bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); - bio_copy_dev(bio, prev); + bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left), + prev->bi_opf, GFP_KERNEL); bio->bi_iter.bi_sector = bio_end_sector(prev); - bio->bi_opf = prev->bi_opf; bio_chain(prev, bio); submit_bio(prev); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index b45e0d50a405..ae87fd95b17e 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1440,12 +1440,10 @@ next_chunk: atomic_inc(&bp->b_io_remaining); nr_pages = bio_max_segs(total_nr_pages); - bio = bio_alloc(GFP_NOIO, nr_pages); - bio_set_dev(bio, bp->b_target->bt_bdev); + bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO); bio->bi_iter.bi_sector = sector; bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_private = bp; - bio->bi_opf = op; for (; size && nr_pages; nr_pages--, page_index++) { int rbytes, nbytes = PAGE_SIZE - offset; diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index b76dfb310ab6..c0fc2c326dce 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -692,12 +692,11 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) if (!nr_pages) return 0; - bio = bio_alloc(GFP_NOFS, nr_pages); - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, nr_pages, + REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS); bio->bi_iter.bi_sector = zi->i_zsector; bio->bi_write_hint = iocb->ki_hint; bio->bi_ioprio = iocb->ki_ioprio; - bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; if (iocb->ki_flags & IOCB_DSYNC) bio->bi_opf |= REQ_FUA; diff --git a/include/linux/bio.h b/include/linux/bio.h index 5c5ada2ebb27..be6ac92913d4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -418,9 +418,10 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio_set fs_bio_set; -static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) +static inline struct bio *bio_alloc(struct block_device *bdev, + unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask) { - return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set); + return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); } void submit_bio(struct bio *bio); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index f1bd03129575..6c4f983cbacc 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -276,10 +276,9 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, struct bio *bio; int error = 0; - bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1); + bio = bio_alloc(hib_resume_bdev, 1, op | op_flags, + GFP_NOIO | __GFP_HIGH); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); - bio_set_dev(bio, hib_resume_bdev); - bio_set_op_attrs(bio, op, op_flags); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { pr_err("Adding page to bio failed at %llu\n", diff --git a/mm/page_io.c b/mm/page_io.c index 0bf8e40f4e57..61c792f916fa 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -338,10 +338,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, return 0; } - bio = bio_alloc(GFP_NOIO, 1); - bio_set_dev(bio, sis->bdev); + bio = bio_alloc(sis->bdev, 1, + REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), + GFP_NOIO); bio->bi_iter.bi_sector = swap_page_sector(page); - bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); bio->bi_end_io = end_write_func; bio_add_page(bio, page, thp_size(page), 0); @@ -403,9 +403,7 @@ int swap_readpage(struct page *page, bool synchronous) } ret = 0; - bio = bio_alloc(GFP_KERNEL, 1); - bio_set_dev(bio, sis->bdev); - bio->bi_opf = REQ_OP_READ; + bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read; bio_add_page(bio, page, thp_size(page), 0); -- cgit v1.2.3-71-gd317 From 49add4966d79244013fce35f95c6833fae82b8b1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:11:06 +0100 Subject: block: pass a block_device and opf to bio_init Pass the block_device that we plan to use this bio for and the operation to bio_init to optimize the assignment. A NULL block_device can be passed, both for the passthrough case on a raw request_queue and to temporarily avoid refactoring some nasty code. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220124091107.642561-19-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 27 +++++++++++++-------------- block/blk-flush.c | 4 +--- block/blk-zoned.c | 5 +---- block/fops.c | 18 +++++++++--------- drivers/block/floppy.c | 4 +--- drivers/block/zram/zram_drv.c | 5 ++--- drivers/md/bcache/io.c | 3 ++- drivers/md/bcache/journal.c | 4 +--- drivers/md/bcache/movinggc.c | 4 ++-- drivers/md/bcache/request.c | 2 +- drivers/md/bcache/super.c | 8 +++----- drivers/md/bcache/writeback.c | 4 ++-- drivers/md/dm.c | 5 ++--- drivers/md/md-multipath.c | 2 +- drivers/md/md.c | 8 +++----- drivers/md/raid5-cache.c | 2 +- drivers/md/raid5-ppl.c | 2 +- drivers/md/raid5.c | 4 ++-- drivers/nvme/target/io-cmd-bdev.c | 10 ++++------ drivers/nvme/target/passthru.c | 4 ++-- drivers/nvme/target/zns.c | 4 ++-- fs/iomap/buffered-io.c | 4 +--- fs/xfs/xfs_bio_io.c | 4 +--- fs/xfs/xfs_log.c | 14 +++++++------- fs/zonefs/super.c | 4 +--- include/linux/bio.h | 4 ++-- 26 files changed, 68 insertions(+), 91 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index b73c9babd583..b2133d86e885 100644 --- a/block/bio.c +++ b/block/bio.c @@ -249,12 +249,12 @@ static void bio_free(struct bio *bio) * they must remember to pair any call to bio_init() with bio_uninit() * when IO has completed, or when the bio is released. */ -void bio_init(struct bio *bio, struct bio_vec *table, - unsigned short max_vecs) +void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, + unsigned short max_vecs, unsigned int opf) { bio->bi_next = NULL; - bio->bi_bdev = NULL; - bio->bi_opf = 0; + bio->bi_bdev = bdev; + bio->bi_opf = opf; bio->bi_flags = 0; bio->bi_ioprio = 0; bio->bi_write_hint = 0; @@ -268,6 +268,8 @@ void bio_init(struct bio *bio, struct bio_vec *table, #ifdef CONFIG_BLK_CGROUP bio->bi_blkg = NULL; bio->bi_issue.value = 0; + if (bdev) + bio_associate_blkg(bio); #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif @@ -504,17 +506,14 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, if (unlikely(!bvl)) goto err_free; - bio_init(bio, bvl, nr_vecs); + bio_init(bio, bdev, bvl, nr_vecs, opf); } else if (nr_vecs) { - bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); + bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); } else { - bio_init(bio, NULL, 0); + bio_init(bio, bdev, NULL, 0, opf); } bio->bi_pool = bs; - if (bdev) - bio_set_dev(bio, bdev); - bio->bi_opf = opf; return bio; err_free: @@ -542,7 +541,8 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); if (unlikely(!bio)) return NULL; - bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs); + bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, + 0); bio->bi_pool = NULL; return bio; } @@ -1756,9 +1756,8 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, cache->free_list = bio->bi_next; cache->nr--; put_cpu(); - bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); - bio_set_dev(bio, bdev); - bio->bi_opf = opf; + bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, + nr_vecs, opf); bio->bi_pool = bs; bio_set_flag(bio, BIO_PERCPU_CACHE); return bio; diff --git a/block/blk-flush.c b/block/blk-flush.c index e4df894189ce..c68968724870 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -460,9 +460,7 @@ int blkdev_issue_flush(struct block_device *bdev) { struct bio bio; - bio_init(&bio, NULL, 0); - bio_set_dev(&bio, bdev); - bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH); return submit_bio_wait(&bio); } EXPORT_SYMBOL(blkdev_issue_flush); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 5ab755d792c8..602bef54c813 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -238,10 +238,7 @@ static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask) { struct bio bio; - bio_init(&bio, NULL, 0); - bio_set_dev(&bio, bdev); - bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; - + bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); return submit_bio_wait(&bio); } diff --git a/block/fops.c b/block/fops.c index c68359684773..3696665e586a 100644 --- a/block/fops.c +++ b/block/fops.c @@ -75,8 +75,13 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, return -ENOMEM; } - bio_init(&bio, vecs, nr_pages); - bio_set_dev(&bio, bdev); + if (iov_iter_rw(iter) == READ) { + bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); + if (iter_is_iovec(iter)) + should_dirty = true; + } else { + bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); + } bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio.bi_write_hint = iocb->ki_hint; bio.bi_private = current; @@ -88,14 +93,9 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, goto out; ret = bio.bi_iter.bi_size; - if (iov_iter_rw(iter) == READ) { - bio.bi_opf = REQ_OP_READ; - if (iter_is_iovec(iter)) - should_dirty = true; - } else { - bio.bi_opf = dio_bio_write_op(iocb); + if (iov_iter_rw(iter) == WRITE) task_io_account_write(ret); - } + if (iocb->ki_flags & IOCB_NOWAIT) bio.bi_opf |= REQ_NOWAIT; if (iocb->ki_flags & IOCB_HIPRI) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index e611411a934c..19c2d0327e15 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4129,15 +4129,13 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) cbdata.drive = drive; - bio_init(&bio, &bio_vec, 1); - bio_set_dev(&bio, bdev); + bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ); bio_add_page(&bio, page, block_size(bdev), 0); bio.bi_iter.bi_sector = 0; bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; - bio_set_op_attrs(&bio, REQ_OP_READ, 0); init_completion(&cbdata.complete); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f3fe0ea8aa80..a3a5e1e71326 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -743,10 +743,9 @@ static ssize_t writeback_store(struct device *dev, continue; } - bio_init(&bio, &bio_vec, 1); - bio_set_dev(&bio, zram->bdev); + bio_init(&bio, zram->bdev, &bio_vec, 1, + REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); - bio.bi_opf = REQ_OP_WRITE | REQ_SYNC; bio_add_page(&bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9c6f9ec55b72..020712c5203f 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -26,7 +26,8 @@ struct bio *bch_bbio_alloc(struct cache_set *c) struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); struct bio *bio = &b->bio; - bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); + bio_init(bio, NULL, bio->bi_inline_vecs, + meta_bucket_pages(&c->cache->sb), 0); return bio; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 61bd79babf7a..6d26c5b06e2b 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -611,11 +611,9 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); - bio_init(bio, bio->bi_inline_vecs, 1); - bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); + bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index b9c3d27ec093..99499d1f6e66 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -79,8 +79,8 @@ static void moving_init(struct moving_io *io) { struct bio *bio = &io->bio.bio; - bio_init(bio, bio->bi_inline_vecs, - DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS)); + bio_init(bio, NULL, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0); bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index c4b7e434de8a..d4b98ebffd94 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s, { struct bio *bio = &s->bio.bio; - bio_init(bio, NULL, 0); + bio_init(bio, NULL, NULL, 0, 0); __bio_clone_fast(bio, orig_bio); /* * bi_end_io can be set separately somewhere else, e.g. the diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c31a62b963f0..bf3de149d3c9 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -342,8 +342,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) down(&dc->sb_write_mutex); closure_init(cl, parent); - bio_init(bio, dc->sb_bv, 1); - bio_set_dev(bio, dc->bdev); + bio_init(bio, dc->bdev, dc->sb_bv, 1, 0); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; @@ -386,8 +385,7 @@ void bcache_write_super(struct cache_set *c) if (ca->sb.version < version) ca->sb.version = version; - bio_init(bio, ca->sb_bv, 1); - bio_set_dev(bio, ca->bdev); + bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); bio->bi_end_io = write_super_endio; bio->bi_private = ca; @@ -2239,7 +2237,7 @@ static int cache_alloc(struct cache *ca) __module_get(THIS_MODULE); kobject_init(&ca->kobj, &bch_cache_ktype); - bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); + bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); /* * when ca->sb.njournal_buckets is not zero, journal exists, diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index c7560f66dca8..d42301e6309d 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -292,8 +292,8 @@ static void dirty_init(struct keybuf_key *w) struct dirty_io *io = w->private; struct bio *bio = &io->bio; - bio_init(bio, bio->bi_inline_vecs, - DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); + bio_init(bio, NULL, bio->bi_inline_vecs, + DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0); if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 069e29013b6b..fa596b654c99 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1303,9 +1303,8 @@ static int __send_empty_flush(struct clone_info *ci) * need to reference it after submit. It's just used as * the basis for the clone(s). */ - bio_init(&flush_bio, NULL, 0); - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - bio_set_dev(&flush_bio, ci->io->md->disk->part0); + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, + REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); ci->bio = &flush_bio; ci->sector_count = 0; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e7d6486f090f..5e15940634d8 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -121,7 +121,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) } multipath = conf->multipaths + mp_bh->path; - bio_init(&mp_bh->bio, NULL, 0); + bio_init(&mp_bh->bio, NULL, NULL, 0, 0); __bio_clone_fast(&mp_bh->bio, bio); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; diff --git a/drivers/md/md.c b/drivers/md/md.c index 40fc1f7e65c5..0a89f072dae0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -998,13 +998,11 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct bio bio; struct bio_vec bvec; - bio_init(&bio, &bvec, 1); - if (metadata_op && rdev->meta_bdev) - bio_set_dev(&bio, rdev->meta_bdev); + bio_init(&bio, rdev->meta_bdev, &bvec, 1, op | op_flags); else - bio_set_dev(&bio, rdev->bdev); - bio.bi_opf = op | op_flags; + bio_init(&bio, rdev->bdev, &bvec, 1, op | op_flags); + if (metadata_op) bio.bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 66313adf9987..98b9ca11c28d 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -3108,7 +3108,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->io_end_ios); INIT_LIST_HEAD(&log->flushing_ios); INIT_LIST_HEAD(&log->finished_ios); - bio_init(&log->flush_bio, NULL, 0); + bio_init(&log->flush_bio, NULL, NULL, 0, 0); log->io_kc = KMEM_CACHE(r5l_io_unit, 0); if (!log->io_kc) diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 054d3bb252d4..3446797fa0ac 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -250,7 +250,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, INIT_LIST_HEAD(&io->stripe_list); atomic_set(&io->pending_stripes, 0); atomic_set(&io->pending_flushes, 0); - bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); + bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0); pplhdr = page_address(io->header_page); clear_page(pplhdr); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ffe720c73b0a..a9dcc5bc9c32 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2310,8 +2310,8 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, for (i = 0; i < disks; i++) { struct r5dev *dev = &sh->dev[i]; - bio_init(&dev->req, &dev->vec, 1); - bio_init(&dev->rreq, &dev->rvec, 1); + bio_init(&dev->req, NULL, &dev->vec, 1, 0); + bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0); } if (raid5_has_ppl(conf)) { diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index e092af3abc71..95c2bbb0b2f5 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -267,9 +267,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio_set_dev(bio, req->ns->bdev); - bio->bi_opf = op; + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), op); } else { bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, GFP_KERNEL); @@ -328,11 +327,10 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) if (!nvmet_check_transfer_len(req, 0)) return; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio_set_dev(bio, req->ns->bdev); + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH); bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(bio); } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 38f72968c3fd..a810bf569fff 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -206,8 +206,8 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) if (nvmet_use_inline_bvec(req)) { bio = &req->p.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio->bi_opf = req_op(rq); + bio_init(bio, NULL, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), req_op(rq)); } else { bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), GFP_KERNEL); diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 62c53e8f26d3..3e421217a7ad 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -552,8 +552,8 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); - bio->bi_opf = op; + bio_init(bio, req->ns->bdev, req->inline_bvec, + ARRAY_SIZE(req->inline_bvec), op); } else { bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 70f3657a6ec0..491534e90861 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -549,10 +549,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, struct bio_vec bvec; struct bio bio; - bio_init(&bio, &bvec, 1); - bio.bi_opf = REQ_OP_READ; + bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); - bio_set_dev(&bio, iomap->bdev); bio_add_folio(&bio, folio, plen, poff); return submit_bio_wait(&bio); } diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c index eff4a9f21dcf..32fa02945f73 100644 --- a/fs/xfs/xfs_bio_io.c +++ b/fs/xfs/xfs_bio_io.c @@ -36,9 +36,7 @@ xfs_flush_bdev_async( return; } - bio_init(bio, NULL, 0); - bio_set_dev(bio, bdev); - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); bio->bi_private = done; bio->bi_end_io = xfs_flush_bdev_async_endio; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 89fec9a18c34..16f9edbda4eb 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1883,19 +1883,19 @@ xlog_write_iclog( return; } - bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); - bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); - iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; - iclog->ic_bio.bi_end_io = xlog_bio_end_io; - iclog->ic_bio.bi_private = iclog; - /* * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more * IOs coming immediately after this one. This prevents the block layer * writeback throttle from throttling log writes behind background * metadata writeback and causing priority inversions. */ - iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; + bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, + howmany(count, PAGE_SIZE), + REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE); + iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; + iclog->ic_bio.bi_end_io = xlog_bio_end_io; + iclog->ic_bio.bi_private = iclog; + if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { iclog->ic_bio.bi_opf |= REQ_PREFLUSH; /* diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index c0fc2c326dce..d331b52592a0 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1540,10 +1540,8 @@ static int zonefs_read_super(struct super_block *sb) if (!page) return -ENOMEM; - bio_init(&bio, &bio_vec, 1); + bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = 0; - bio.bi_opf = REQ_OP_READ; - bio_set_dev(&bio, sb->s_bdev); bio_add_page(&bio, page, PAGE_SIZE, 0); ret = submit_bio_wait(&bio); diff --git a/include/linux/bio.h b/include/linux/bio.h index be6ac92913d4..41bedf727f59 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -456,8 +456,8 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) struct request_queue; extern int submit_bio_wait(struct bio *bio); -extern void bio_init(struct bio *bio, struct bio_vec *table, - unsigned short max_vecs); +void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, + unsigned short max_vecs, unsigned int opf); extern void bio_uninit(struct bio *); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); -- cgit v1.2.3-71-gd317 From a7c50c940477bae89fb2b4f51bd969a2d95d7512 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jan 2022 10:11:07 +0100 Subject: block: pass a block_device and opf to bio_reset Pass the block_device that we plan to use this bio for and the operation to bio_reset to optimize the assigment. A NULL block_device can be passed, both for the passthrough case on a raw request_queue and to temporarily avoid refactoring some nasty code. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220124091107.642561-20-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 6 +++++- drivers/block/pktcdvd.c | 8 ++------ drivers/md/bcache/journal.c | 12 ++++-------- drivers/md/bcache/request.c | 4 ++-- drivers/md/raid1.c | 5 ++--- drivers/md/raid10.c | 8 +++----- drivers/md/raid5-cache.c | 9 +++------ drivers/md/raid5.c | 8 ++++---- fs/btrfs/disk-io.c | 4 +--- fs/crypto/bio.c | 8 ++------ include/linux/bio.h | 9 +-------- 11 files changed, 29 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index b2133d86e885..03cefe81950f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -295,6 +295,8 @@ EXPORT_SYMBOL(bio_init); /** * bio_reset - reinitialize a bio * @bio: bio to reset + * @bdev: block device to use the bio for + * @opf: operation and flags for bio * * Description: * After calling bio_reset(), @bio will be in the same state as a freshly @@ -302,11 +304,13 @@ EXPORT_SYMBOL(bio_init); * preserved are the ones that are initialized by bio_alloc_bioset(). See * comment in struct bio. */ -void bio_reset(struct bio *bio) +void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf) { bio_uninit(bio); memset(bio, 0, BIO_RESET_BYTES); atomic_set(&bio->__bi_remaining, 1); + bio->bi_bdev = bdev; + bio->bi_opf = opf; } EXPORT_SYMBOL(bio_reset); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 2b6b70a39e76..3aa595442946 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) continue; bio = pkt->r_bios[f]; - bio_reset(bio); + bio_reset(bio, pd->bdev, REQ_OP_READ); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); - bio_set_dev(bio, pd->bdev); bio->bi_end_io = pkt_end_io_read; bio->bi_private = pkt; @@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) BUG(); atomic_inc(&pkt->io_wait); - bio_set_op_attrs(bio, REQ_OP_READ, 0); pkt_queue_bio(pd, bio); frames_read++; } @@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) { int f; - bio_reset(pkt->w_bio); + bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE); pkt->w_bio->bi_iter.bi_sector = pkt->sector; - bio_set_dev(pkt->w_bio, pd->bdev); pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; @@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) /* Start the write request */ atomic_set(&pkt->io_wait, 1); - bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); pkt_queue_bio(pd, pkt->w_bio); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6d26c5b06e2b..7c2ca52ca3e4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, reread: left = ca->sb.bucket_size - offset; len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS); - bio_reset(bio); + bio_reset(bio, ca->bdev, REQ_OP_READ); bio->bi_iter.bi_sector = bucket + offset; - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; - bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); closure_bio_submit(ca->set, bio, &cl); @@ -771,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl) atomic_long_add(sectors, &ca->meta_sectors_written); - bio_reset(bio); + bio_reset(bio, ca->bdev, REQ_OP_WRITE | + REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA); + bch_bio_map(bio, w->data); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); - bio_set_dev(bio, ca->bdev); bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; - bio_set_op_attrs(bio, REQ_OP_WRITE, - REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); - bch_bio_map(bio, w->data); trace_bcache_journal_write(bio, w->data->keys); bio_list_add(&list, bio); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d4b98ebffd94..7ba59d08ed87 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -831,11 +831,11 @@ static void cached_dev_read_done(struct closure *cl) */ if (s->iop.bio) { - bio_reset(s->iop.bio); + bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ); s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; - bio_copy_dev(s->iop.bio, s->cache_miss); s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; + bio_clone_blkg_association(s->iop.bio, s->cache_miss); bch_bio_map(s->iop.bio, NULL); bio_copy_data(s->cache_miss, s->iop.bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 43276f8fdc81..e7710fb5befb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2166,11 +2166,10 @@ static void process_checks(struct r1bio *r1_bio) continue; /* fixup the bio for reuse, but preserve errno */ status = b->bi_status; - bio_reset(b); + bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ); b->bi_status = status; b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; - bio_set_dev(b, conf->mirrors[i].rdev->bdev); b->bi_end_io = end_sync_read; rp->raid_bio = r1_bio; b->bi_private = rp; @@ -2651,7 +2650,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) for (i = conf->poolinfo->raid_disks; i--; ) { bio = r1bio->bios[i]; rps = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rps; } r1bio->master_bio = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index cb7c58050708..da07bcbc06d0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2422,7 +2422,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) * bi_vecs, as the read request might have corrupted these */ rp = get_resync_pages(tbio); - bio_reset(tbio); + bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); @@ -2430,7 +2430,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tbio->bi_private = rp; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; tbio->bi_end_io = end_sync_write; - bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); bio_copy_data(tbio, fbio); @@ -2441,7 +2440,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; - bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); submit_bio_noacct(tbio); } @@ -3160,12 +3158,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) for (i = 0; i < nalloc; i++) { bio = r10bio->devs[i].bio; rp = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rp; bio = r10bio->devs[i].repl_bio; if (bio) { rp = bio->bi_private; - bio_reset(bio); + bio_reset(bio, NULL, 0); bio->bi_private = rp; } } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 98b9ca11c28d..86e2bb89d9c7 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1301,10 +1301,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) if (!do_flush) return; - bio_reset(&log->flush_bio); - bio_set_dev(&log->flush_bio, log->rdev->bdev); + bio_reset(&log->flush_bio, log->rdev->bdev, + REQ_OP_WRITE | REQ_PREFLUSH); log->flush_bio.bi_end_io = r5l_log_flush_endio; - log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; submit_bio(&log->flush_bio); } @@ -1678,9 +1677,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, struct r5l_recovery_ctx *ctx, sector_t offset) { - bio_reset(ctx->ra_bio); - bio_set_dev(ctx->ra_bio, log->rdev->bdev); - bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); + bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ); ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; ctx->valid_pages = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a9dcc5bc9c32..7c119208a214 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2677,7 +2677,7 @@ static void raid5_end_read_request(struct bio * bi) (unsigned long long)sh->sector, i, atomic_read(&sh->count), bi->bi_status); if (i == disks) { - bio_reset(bi); + bio_reset(bi, NULL, 0); BUG(); return; } @@ -2785,7 +2785,7 @@ static void raid5_end_read_request(struct bio * bi) } } rdev_dec_pending(rdev, conf->mddev); - bio_reset(bi); + bio_reset(bi, NULL, 0); clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); raid5_release_stripe(sh); @@ -2823,7 +2823,7 @@ static void raid5_end_write_request(struct bio *bi) (unsigned long long)sh->sector, i, atomic_read(&sh->count), bi->bi_status); if (i == disks) { - bio_reset(bi); + bio_reset(bi, NULL, 0); BUG(); return; } @@ -2860,7 +2860,7 @@ static void raid5_end_write_request(struct bio *bi) if (sh->batch_head && bi->bi_status && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); - bio_reset(bi); + bio_reset(bi, NULL, 0); if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f45aa506f9a6..505ba21230b1 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4154,10 +4154,8 @@ static void write_dev_flush(struct btrfs_device *device) return; #endif - bio_reset(bio); + bio_reset(bio, device->bdev, REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH); bio->bi_end_io = btrfs_end_empty_barrier; - bio_set_dev(bio, device->bdev); - bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; init_completion(&device->flush_wait); bio->bi_private = &device->flush_wait; diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 755e985a42e0..2217fe5ece6f 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -80,9 +80,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, err = submit_bio_wait(bio); if (err) goto out; - bio_reset(bio); - bio_set_dev(bio, inode->i_sb->s_bdev); - bio->bi_opf = REQ_OP_WRITE; + bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE); num_pages = 0; } } @@ -181,9 +179,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, err = submit_bio_wait(bio); if (err) goto out; - bio_reset(bio); - bio_set_dev(bio, inode->i_sb->s_bdev); - bio->bi_opf = REQ_OP_WRITE; + bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE); } while (len != 0); err = 0; out: diff --git a/include/linux/bio.h b/include/linux/bio.h index 41bedf727f59..18cfe5bb41ea 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -459,7 +459,7 @@ extern int submit_bio_wait(struct bio *bio); void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, unsigned short max_vecs, unsigned int opf); extern void bio_uninit(struct bio *); -extern void bio_reset(struct bio *); +void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf); void bio_chain(struct bio *, struct bio *); int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); @@ -517,13 +517,6 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) bio_associate_blkg(bio); } -static inline void bio_copy_dev(struct bio *dst, struct bio *src) -{ - bio_clear_flag(dst, BIO_REMAPPED); - dst->bi_bdev = src->bi_bdev; - bio_clone_blkg_association(dst, src); -} - /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * -- cgit v1.2.3-71-gd317 From b1f866b013e6e5583f2f0bf4a61d13eddb9a1799 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Jan 2022 08:05:48 +0100 Subject: block: remove blk_needs_flush_plug blk_needs_flush_plug fails to account for the cb_list, which needs flushing as well. Remove it and just check if there is a plug instead of poking into the internals of the plug structure. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220127070549.1377856-1-hch@lst.de Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 2 +- include/linux/blkdev.h | 13 ------------- kernel/exit.c | 2 +- kernel/sched/core.c | 2 +- 4 files changed, 3 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f8d7fe6db989..f4ce38f6fc31 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2301,7 +2301,7 @@ void wakeup_flusher_threads(enum wb_reason reason) /* * If we are expecting writeback progress we must submit plugged IO. */ - if (blk_needs_flush_plug(current)) + if (current->plug) blk_flush_plug(current->plug, true); rcu_read_lock(); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99a4384bb8a5..f902a1c2fac0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1055,14 +1055,6 @@ extern void blk_finish_plug(struct blk_plug *); void blk_flush_plug(struct blk_plug *plug, bool from_schedule); -static inline bool blk_needs_flush_plug(struct task_struct *tsk) -{ - struct blk_plug *plug = tsk->plug; - - return plug && - (plug->mq_list || !list_empty(&plug->cb_list)); -} - int blkdev_issue_flush(struct block_device *bdev); long nr_blockdev_pages(void); #else /* CONFIG_BLOCK */ @@ -1086,11 +1078,6 @@ static inline void blk_flush_plug(struct blk_plug *plug, bool async) { } -static inline bool blk_needs_flush_plug(struct task_struct *tsk) -{ - return false; -} - static inline int blkdev_issue_flush(struct block_device *bdev) { return 0; diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..11fc6c9df9f2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -735,7 +735,7 @@ void __noreturn do_exit(long code) struct task_struct *tsk = current; int group_dead; - WARN_ON(blk_needs_flush_plug(tsk)); + WARN_ON(tsk->plug); /* * If do_dead is called because this processes oopsed, it's possible diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 848eaa0efe0e..3487bb92d1f2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6344,7 +6344,7 @@ static inline void sched_submit_work(struct task_struct *tsk) * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ - if (blk_needs_flush_plug(tsk)) + if (tsk->plug) blk_flush_plug(tsk->plug, true); } -- cgit v1.2.3-71-gd317 From aa8dcccaf32bfdc09f2aff089d5d60c37da5b7b5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Jan 2022 08:05:49 +0100 Subject: block: check that there is a plug in blk_flush_plug Rename blk_flush_plug to __blk_flush_plug and add a wrapper that includes the NULL check instead of open coding that check everywhere. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220127070549.1377856-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 7 +++---- fs/fs-writeback.c | 6 ++---- include/linux/blkdev.h | 7 ++++++- kernel/sched/core.c | 7 ++----- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index d93e3bb9a769..61f6a0dc4511 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -991,8 +991,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; - if (current->plug) - blk_flush_plug(current->plug, false); + blk_flush_plug(current->plug, false); if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) return 0; @@ -1274,7 +1273,7 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, } EXPORT_SYMBOL(blk_check_plugged); -void blk_flush_plug(struct blk_plug *plug, bool from_schedule) +void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) { if (!list_empty(&plug->cb_list)) flush_plug_callbacks(plug, from_schedule); @@ -1303,7 +1302,7 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule) void blk_finish_plug(struct blk_plug *plug) { if (plug == current->plug) { - blk_flush_plug(plug, false); + __blk_flush_plug(plug, false); current->plug = NULL; } } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f4ce38f6fc31..33d54c9fbefc 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1903,8 +1903,7 @@ static long writeback_sb_inodes(struct super_block *sb, * unplug, so get our IOs out the door before we * give up the CPU. */ - if (current->plug) - blk_flush_plug(current->plug, false); + blk_flush_plug(current->plug, false); cond_resched(); } @@ -2301,8 +2300,7 @@ void wakeup_flusher_threads(enum wb_reason reason) /* * If we are expecting writeback progress we must submit plugged IO. */ - if (current->plug) - blk_flush_plug(current->plug, true); + blk_flush_plug(current->plug, true); rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f902a1c2fac0..654163d3b903 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1053,7 +1053,12 @@ extern void blk_start_plug(struct blk_plug *); extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); extern void blk_finish_plug(struct blk_plug *); -void blk_flush_plug(struct blk_plug *plug, bool from_schedule); +void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); +static inline void blk_flush_plug(struct blk_plug *plug, bool async) +{ + if (plug) + __blk_flush_plug(plug, async); +} int blkdev_issue_flush(struct block_device *bdev); long nr_blockdev_pages(void); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3487bb92d1f2..46152982e400 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6344,8 +6344,7 @@ static inline void sched_submit_work(struct task_struct *tsk) * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ - if (tsk->plug) - blk_flush_plug(tsk->plug, true); + blk_flush_plug(tsk->plug, true); } static void sched_update_worker(struct task_struct *tsk) @@ -8371,9 +8370,7 @@ int io_schedule_prepare(void) int old_iowait = current->in_iowait; current->in_iowait = 1; - if (current->plug) - blk_flush_plug(current->plug, true); - + blk_flush_plug(current->plug, true); return old_iowait; } -- cgit v1.2.3-71-gd317 From b42c1fc3d55e077d36718ad9800d89100b2aff81 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Jan 2022 07:41:25 +0100 Subject: block: fix the kerneldoc for bio_end_io_acct Document the actually existing parameter name. Reported-by: Stephen Rothwell Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220127064125.1314347-1-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 654163d3b903..3bfc75a2a450 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1520,7 +1520,7 @@ void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, /** * bio_end_io_acct - end I/O accounting for bio based drivers * @bio: bio to end account for - * @start: start time returned by bio_start_io_acct() + * @start_time: start time returned by bio_start_io_acct() */ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { -- cgit v1.2.3-71-gd317 From e1d2699b96793d19388e302fa095e0da2c145701 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Jan 2022 22:10:52 -0500 Subject: NFS: Avoid duplicate uncached readdir calls on eof If we've reached the end of the directory, then cache that information in the context so that we don't need to do an uncached readdir in order to rediscover that fact. Fixes: 794092c57f89 ("NFS: Do uncached readdir when we're seeking a cookie in an empty page cache") Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/dir.c | 20 +++++++++++++++----- include/linux/nfs_fs.h | 1 + 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a3de586d21e2..7bc7cf6b26f0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -80,6 +80,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir ctx->dir_cookie = 0; ctx->dup_cookie = 0; ctx->page_index = 0; + ctx->eof = false; spin_lock(&dir->i_lock); if (list_empty(&nfsi->open_files) && (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) @@ -168,6 +169,7 @@ struct nfs_readdir_descriptor { unsigned int cache_entry_index; signed char duped; bool plus; + bool eob; bool eof; }; @@ -989,7 +991,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, ent = &array->array[i]; if (!dir_emit(desc->ctx, ent->name, ent->name_len, nfs_compat_user_ino64(ent->ino), ent->d_type)) { - desc->eof = true; + desc->eob = true; break; } memcpy(desc->verf, verf, sizeof(desc->verf)); @@ -1005,7 +1007,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, desc->duped = 1; } if (array->page_is_eof) - desc->eof = true; + desc->eof = !desc->eob; kunmap(desc->page); dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n", @@ -1048,7 +1050,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz); - for (i = 0; !desc->eof && i < sz && arrays[i]; i++) { + for (i = 0; !desc->eob && i < sz && arrays[i]; i++) { desc->page = arrays[i]; nfs_do_filldir(desc, verf); } @@ -1107,9 +1109,15 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->duped = dir_ctx->duped; page_index = dir_ctx->page_index; desc->attr_gencount = dir_ctx->attr_gencount; + desc->eof = dir_ctx->eof; memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); spin_unlock(&file->f_lock); + if (desc->eof) { + res = 0; + goto out_free; + } + if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) && list_is_singular(&nfsi->open_files)) invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); @@ -1143,7 +1151,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) nfs_do_filldir(desc, nfsi->cookieverf); nfs_readdir_page_unlock_and_put_cached(desc); - } while (!desc->eof); + } while (!desc->eob && !desc->eof); spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; @@ -1151,9 +1159,10 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) dir_ctx->duped = desc->duped; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; + dir_ctx->eof = desc->eof; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); spin_unlock(&file->f_lock); - +out_free: kfree(desc); out: @@ -1195,6 +1204,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) if (offset == 0) memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); dir_ctx->duped = 0; + dir_ctx->eof = false; } spin_unlock(&filp->f_lock); return offset; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 02aa49323d1d..68f81d8d36de 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -107,6 +107,7 @@ struct nfs_open_dir_context { __u64 dup_cookie; pgoff_t page_index; signed char duped; + bool eof; }; /* -- cgit v1.2.3-71-gd317 From 2ea88716369ac9a7486a8cb309d6bf1239ea156c Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sun, 23 Jan 2022 17:27:47 +0100 Subject: libceph: make recv path in secure mode work the same as send path The recv path of secure mode is intertwined with that of crc mode. While it's slightly more efficient that way (the ciphertext is read into the destination buffer and decrypted in place, thus avoiding two potentially heavy memory allocations for the bounce buffer and the corresponding sg array), it isn't really amenable to changes. Sacrifice that edge and align with the send path which always uses a full-sized bounce buffer (currently there is no other way -- if the kernel crypto API ever grows support for streaming (piecewise) en/decryption for GCM [1], we would be able to easily take advantage of that on both sides). [1] https://lore.kernel.org/all/20141225202830.GA18794@gondor.apana.org.au/ Signed-off-by: Ilya Dryomov Reviewed-by: Jeff Layton --- include/linux/ceph/messenger.h | 4 + net/ceph/messenger_v2.c | 216 +++++++++++++++++++++++++++++------------ 2 files changed, 158 insertions(+), 62 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index ff99ce094cfa..6c6b6ea52bb8 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -383,6 +383,10 @@ struct ceph_connection_v2_info { struct ceph_gcm_nonce in_gcm_nonce; struct ceph_gcm_nonce out_gcm_nonce; + struct page **in_enc_pages; + int in_enc_page_cnt; + int in_enc_resid; + int in_enc_i; struct page **out_enc_pages; int out_enc_page_cnt; int out_enc_resid; diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index c4099b641b38..2ea00489e691 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -57,8 +57,9 @@ #define IN_S_HANDLE_CONTROL_REMAINDER 3 #define IN_S_PREPARE_READ_DATA 4 #define IN_S_PREPARE_READ_DATA_CONT 5 -#define IN_S_HANDLE_EPILOGUE 6 -#define IN_S_FINISH_SKIP 7 +#define IN_S_PREPARE_READ_ENC_PAGE 6 +#define IN_S_HANDLE_EPILOGUE 7 +#define IN_S_FINISH_SKIP 8 #define OUT_S_QUEUE_DATA 1 #define OUT_S_QUEUE_DATA_CONT 2 @@ -1032,22 +1033,41 @@ static int decrypt_control_remainder(struct ceph_connection *con) padded_len(rem_len) + CEPH_GCM_TAG_LEN); } -static int decrypt_message(struct ceph_connection *con) +static int decrypt_tail(struct ceph_connection *con) { + struct sg_table enc_sgt = {}; struct sg_table sgt = {}; + int tail_len; int ret; + tail_len = tail_onwire_len(con->in_msg, true); + ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages, + con->v2.in_enc_page_cnt, 0, tail_len, + GFP_NOIO); + if (ret) + goto out; + ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf), MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf), con->v2.in_buf, true); if (ret) goto out; - ret = gcm_crypt(con, false, sgt.sgl, sgt.sgl, - tail_onwire_len(con->in_msg, true)); + dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con, + con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents); + ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len); + if (ret) + goto out; + + WARN_ON(!con->v2.in_enc_page_cnt); + ceph_release_page_vector(con->v2.in_enc_pages, + con->v2.in_enc_page_cnt); + con->v2.in_enc_pages = NULL; + con->v2.in_enc_page_cnt = 0; out: sg_free_table(&sgt); + sg_free_table(&enc_sgt); return ret; } @@ -1737,8 +1757,7 @@ static void prepare_read_data(struct ceph_connection *con) { struct bio_vec bv; - if (!con_secure(con)) - con->in_data_crc = -1; + con->in_data_crc = -1; ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg, data_len(con->in_msg)); @@ -1751,11 +1770,10 @@ static void prepare_read_data_cont(struct ceph_connection *con) { struct bio_vec bv; - if (!con_secure(con)) - con->in_data_crc = ceph_crc32c_page(con->in_data_crc, - con->v2.in_bvec.bv_page, - con->v2.in_bvec.bv_offset, - con->v2.in_bvec.bv_len); + con->in_data_crc = ceph_crc32c_page(con->in_data_crc, + con->v2.in_bvec.bv_page, + con->v2.in_bvec.bv_offset, + con->v2.in_bvec.bv_len); ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len); if (con->v2.in_cursor.total_resid) { @@ -1766,21 +1784,94 @@ static void prepare_read_data_cont(struct ceph_connection *con) } /* - * We've read all data. Prepare to read data padding (if any) - * and epilogue. + * We've read all data. Prepare to read epilogue. */ reset_in_kvecs(con); - if (con_secure(con)) { - if (need_padding(data_len(con->in_msg))) - add_in_kvec(con, DATA_PAD(con->v2.in_buf), - padding_len(data_len(con->in_msg))); - add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_SECURE_LEN); + add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN); + con->v2.in_state = IN_S_HANDLE_EPILOGUE; +} + +static void prepare_read_tail_plain(struct ceph_connection *con) +{ + struct ceph_msg *msg = con->in_msg; + + if (!front_len(msg) && !middle_len(msg)) { + WARN_ON(!data_len(msg)); + prepare_read_data(con); + return; + } + + reset_in_kvecs(con); + if (front_len(msg)) { + add_in_kvec(con, msg->front.iov_base, front_len(msg)); + WARN_ON(msg->front.iov_len != front_len(msg)); + } + if (middle_len(msg)) { + add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg)); + WARN_ON(msg->middle->vec.iov_len != middle_len(msg)); + } + + if (data_len(msg)) { + con->v2.in_state = IN_S_PREPARE_READ_DATA; } else { add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN); + con->v2.in_state = IN_S_HANDLE_EPILOGUE; + } +} + +static void prepare_read_enc_page(struct ceph_connection *con) +{ + struct bio_vec bv; + + dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i, + con->v2.in_enc_resid); + WARN_ON(!con->v2.in_enc_resid); + + bv.bv_page = con->v2.in_enc_pages[con->v2.in_enc_i]; + bv.bv_offset = 0; + bv.bv_len = min(con->v2.in_enc_resid, (int)PAGE_SIZE); + + set_in_bvec(con, &bv); + con->v2.in_enc_i++; + con->v2.in_enc_resid -= bv.bv_len; + + if (con->v2.in_enc_resid) { + con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE; + return; } + + /* + * We are set to read the last piece of ciphertext (ending + * with epilogue) + auth tag. + */ + WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt); con->v2.in_state = IN_S_HANDLE_EPILOGUE; } +static int prepare_read_tail_secure(struct ceph_connection *con) +{ + struct page **enc_pages; + int enc_page_cnt; + int tail_len; + + tail_len = tail_onwire_len(con->in_msg, true); + WARN_ON(!tail_len); + + enc_page_cnt = calc_pages_for(0, tail_len); + enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO); + if (IS_ERR(enc_pages)) + return PTR_ERR(enc_pages); + + WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt); + con->v2.in_enc_pages = enc_pages; + con->v2.in_enc_page_cnt = enc_page_cnt; + con->v2.in_enc_resid = tail_len; + con->v2.in_enc_i = 0; + + prepare_read_enc_page(con); + return 0; +} + static void __finish_skip(struct ceph_connection *con) { con->in_seq++; @@ -2589,46 +2680,26 @@ static int __handle_control(struct ceph_connection *con, void *p) } msg = con->in_msg; /* set in process_message_header() */ - if (!front_len(msg) && !middle_len(msg)) { - if (!data_len(msg)) - return process_message(con); - - prepare_read_data(con); - return 0; - } - - reset_in_kvecs(con); if (front_len(msg)) { WARN_ON(front_len(msg) > msg->front_alloc_len); - add_in_kvec(con, msg->front.iov_base, front_len(msg)); msg->front.iov_len = front_len(msg); - - if (con_secure(con) && need_padding(front_len(msg))) - add_in_kvec(con, FRONT_PAD(con->v2.in_buf), - padding_len(front_len(msg))); } else { msg->front.iov_len = 0; } if (middle_len(msg)) { WARN_ON(middle_len(msg) > msg->middle->alloc_len); - add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg)); msg->middle->vec.iov_len = middle_len(msg); - - if (con_secure(con) && need_padding(middle_len(msg))) - add_in_kvec(con, MIDDLE_PAD(con->v2.in_buf), - padding_len(middle_len(msg))); } else if (msg->middle) { msg->middle->vec.iov_len = 0; } - if (data_len(msg)) { - con->v2.in_state = IN_S_PREPARE_READ_DATA; - } else { - add_in_kvec(con, con->v2.in_buf, - con_secure(con) ? CEPH_EPILOGUE_SECURE_LEN : - CEPH_EPILOGUE_PLAIN_LEN); - con->v2.in_state = IN_S_HANDLE_EPILOGUE; - } + if (!front_len(msg) && !middle_len(msg) && !data_len(msg)) + return process_message(con); + + if (con_secure(con)) + return prepare_read_tail_secure(con); + + prepare_read_tail_plain(con); return 0; } @@ -2717,7 +2788,7 @@ static int handle_epilogue(struct ceph_connection *con) int ret; if (con_secure(con)) { - ret = decrypt_message(con); + ret = decrypt_tail(con); if (ret) { if (ret == -EBADMSG) con->error_msg = "integrity error, bad epilogue auth tag"; @@ -2792,6 +2863,10 @@ static int populate_in_iter(struct ceph_connection *con) prepare_read_data_cont(con); ret = 0; break; + case IN_S_PREPARE_READ_ENC_PAGE: + prepare_read_enc_page(con); + ret = 0; + break; case IN_S_HANDLE_EPILOGUE: ret = handle_epilogue(con); break; @@ -3326,20 +3401,16 @@ void ceph_con_v2_revoke(struct ceph_connection *con) static void revoke_at_prepare_read_data(struct ceph_connection *con) { - int remaining; /* data + [data padding] + epilogue */ + int remaining; int resid; + WARN_ON(con_secure(con)); WARN_ON(!data_len(con->in_msg)); WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter)); resid = iov_iter_count(&con->v2.in_iter); WARN_ON(!resid); - if (con_secure(con)) - remaining = padded_len(data_len(con->in_msg)) + - CEPH_EPILOGUE_SECURE_LEN; - else - remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN; - + remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN; dout("%s con %p resid %d remaining %d\n", __func__, con, resid, remaining); con->v2.in_iter.count -= resid; @@ -3350,8 +3421,9 @@ static void revoke_at_prepare_read_data(struct ceph_connection *con) static void revoke_at_prepare_read_data_cont(struct ceph_connection *con) { int recved, resid; /* current piece of data */ - int remaining; /* [data padding] + epilogue */ + int remaining; + WARN_ON(con_secure(con)); WARN_ON(!data_len(con->in_msg)); WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter)); resid = iov_iter_count(&con->v2.in_iter); @@ -3363,12 +3435,7 @@ static void revoke_at_prepare_read_data_cont(struct ceph_connection *con) ceph_msg_data_advance(&con->v2.in_cursor, recved); WARN_ON(resid > con->v2.in_cursor.total_resid); - if (con_secure(con)) - remaining = padding_len(data_len(con->in_msg)) + - CEPH_EPILOGUE_SECURE_LEN; - else - remaining = CEPH_EPILOGUE_PLAIN_LEN; - + remaining = CEPH_EPILOGUE_PLAIN_LEN; dout("%s con %p total_resid %zu remaining %d\n", __func__, con, con->v2.in_cursor.total_resid, remaining); con->v2.in_iter.count -= resid; @@ -3376,11 +3443,26 @@ static void revoke_at_prepare_read_data_cont(struct ceph_connection *con) con->v2.in_state = IN_S_FINISH_SKIP; } +static void revoke_at_prepare_read_enc_page(struct ceph_connection *con) +{ + int resid; /* current enc page (not necessarily data) */ + + WARN_ON(!con_secure(con)); + WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter)); + resid = iov_iter_count(&con->v2.in_iter); + WARN_ON(!resid || resid > con->v2.in_bvec.bv_len); + + dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid, + con->v2.in_enc_resid); + con->v2.in_iter.count -= resid; + set_in_skip(con, resid + con->v2.in_enc_resid); + con->v2.in_state = IN_S_FINISH_SKIP; +} + static void revoke_at_handle_epilogue(struct ceph_connection *con) { int resid; - WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter)); resid = iov_iter_count(&con->v2.in_iter); WARN_ON(!resid); @@ -3399,6 +3481,9 @@ void ceph_con_v2_revoke_incoming(struct ceph_connection *con) case IN_S_PREPARE_READ_DATA_CONT: revoke_at_prepare_read_data_cont(con); break; + case IN_S_PREPARE_READ_ENC_PAGE: + revoke_at_prepare_read_enc_page(con); + break; case IN_S_HANDLE_EPILOGUE: revoke_at_handle_epilogue(con); break; @@ -3432,6 +3517,13 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con) clear_out_sign_kvecs(con); free_conn_bufs(con); + if (con->v2.in_enc_pages) { + WARN_ON(!con->v2.in_enc_page_cnt); + ceph_release_page_vector(con->v2.in_enc_pages, + con->v2.in_enc_page_cnt); + con->v2.in_enc_pages = NULL; + con->v2.in_enc_page_cnt = 0; + } if (con->v2.out_enc_pages) { WARN_ON(!con->v2.out_enc_page_cnt); ceph_release_page_vector(con->v2.out_enc_pages, -- cgit v1.2.3-71-gd317 From 038b8d1d1ab1cce11a158d30bf080ff41a2cfd15 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 30 Dec 2021 15:13:32 +0100 Subject: libceph: optionally use bounce buffer on recv path in crc mode Both msgr1 and msgr2 in crc mode are zero copy in the sense that message data is read from the socket directly into the destination buffer. We assume that the destination buffer is stable (i.e. remains unchanged while it is being read to) though. Otherwise, CRC errors ensue: libceph: read_partial_message 0000000048edf8ad data crc 1063286393 != exp. 228122706 libceph: osd1 (1)192.168.122.1:6843 bad crc/signature libceph: bad data crc, calculated 57958023, expected 1805382778 libceph: osd2 (2)192.168.122.1:6876 integrity error, bad crc Introduce rxbounce option to enable use of a bounce buffer when receiving message data. In particular this is needed if a mapped image is a Windows VM disk, passed to QEMU. Windows has a system-wide "dummy" page that may be mapped into the destination buffer (potentially more than once into the same buffer) by the Windows Memory Manager in an effort to generate a single large I/O [1][2]. QEMU makes a point of preserving overlap relationships when cloning I/O vectors, so krbd gets exposed to this behaviour. [1] "What Is Really in That MDL?" https://docs.microsoft.com/en-us/previous-versions/windows/hardware/design/dn614012(v=vs.85) [2] https://blogs.msmvps.com/kernelmustard/2005/05/04/dummy-pages/ URL: https://bugzilla.redhat.com/show_bug.cgi?id=1973317 Signed-off-by: Ilya Dryomov Reviewed-by: Jeff Layton --- include/linux/ceph/libceph.h | 1 + include/linux/ceph/messenger.h | 1 + net/ceph/ceph_common.c | 7 +++++ net/ceph/messenger.c | 4 +++ net/ceph/messenger_v1.c | 54 ++++++++++++++++++++++++++++++++++----- net/ceph/messenger_v2.c | 58 ++++++++++++++++++++++++++++++++---------- 6 files changed, 105 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 6a89ea410e43..edf62eaa6285 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -35,6 +35,7 @@ #define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */ #define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */ #define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */ +#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */ #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 6c6b6ea52bb8..e7f2fb2fc207 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -461,6 +461,7 @@ struct ceph_connection { struct ceph_msg *out_msg; /* sending message (== tail of out_sent) */ + struct page *bounce_page; u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */ diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index ecc400a0b7bb..4c6441536d55 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -246,6 +246,7 @@ enum { Opt_cephx_sign_messages, Opt_tcp_nodelay, Opt_abort_on_full, + Opt_rxbounce, }; enum { @@ -295,6 +296,7 @@ static const struct fs_parameter_spec ceph_parameters[] = { fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout), fsparam_enum ("read_from_replica", Opt_read_from_replica, ceph_param_read_from_replica), + fsparam_flag ("rxbounce", Opt_rxbounce), fsparam_enum ("ms_mode", Opt_ms_mode, ceph_param_ms_mode), fsparam_string ("secret", Opt_secret), @@ -584,6 +586,9 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt, case Opt_abort_on_full: opt->flags |= CEPH_OPT_ABORT_ON_FULL; break; + case Opt_rxbounce: + opt->flags |= CEPH_OPT_RXBOUNCE; + break; default: BUG(); @@ -660,6 +665,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, seq_puts(m, "notcp_nodelay,"); if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL)) seq_puts(m, "abort_on_full,"); + if (opt->flags & CEPH_OPT_RXBOUNCE) + seq_puts(m, "rxbounce,"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) seq_printf(m, "mount_timeout=%d,", diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 45eba2dcb67a..d3bb656308b4 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -515,6 +515,10 @@ static void ceph_con_reset_protocol(struct ceph_connection *con) ceph_msg_put(con->out_msg); con->out_msg = NULL; } + if (con->bounce_page) { + __free_page(con->bounce_page); + con->bounce_page = NULL; + } if (ceph_msgr2(from_msgr(con->msgr))) ceph_con_v2_reset_protocol(con); diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c index 2cb5ffdf071a..6b014eca3a13 100644 --- a/net/ceph/messenger_v1.c +++ b/net/ceph/messenger_v1.c @@ -992,8 +992,7 @@ static int read_partial_message_section(struct ceph_connection *con, static int read_partial_msg_data(struct ceph_connection *con) { - struct ceph_msg *msg = con->in_msg; - struct ceph_msg_data_cursor *cursor = &msg->cursor; + struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor; bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); struct page *page; size_t page_offset; @@ -1001,9 +1000,6 @@ static int read_partial_msg_data(struct ceph_connection *con) u32 crc = 0; int ret; - if (!msg->num_data_items) - return -EIO; - if (do_datacrc) crc = con->in_data_crc; while (cursor->total_resid) { @@ -1031,6 +1027,46 @@ static int read_partial_msg_data(struct ceph_connection *con) return 1; /* must return > 0 to indicate success */ } +static int read_partial_msg_data_bounce(struct ceph_connection *con) +{ + struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor; + struct page *page; + size_t off, len; + u32 crc; + int ret; + + if (unlikely(!con->bounce_page)) { + con->bounce_page = alloc_page(GFP_NOIO); + if (!con->bounce_page) { + pr_err("failed to allocate bounce page\n"); + return -ENOMEM; + } + } + + crc = con->in_data_crc; + while (cursor->total_resid) { + if (!cursor->resid) { + ceph_msg_data_advance(cursor, 0); + continue; + } + + page = ceph_msg_data_next(cursor, &off, &len, NULL); + ret = ceph_tcp_recvpage(con->sock, con->bounce_page, 0, len); + if (ret <= 0) { + con->in_data_crc = crc; + return ret; + } + + crc = crc32c(crc, page_address(con->bounce_page), ret); + memcpy_to_page(page, off, page_address(con->bounce_page), ret); + + ceph_msg_data_advance(cursor, ret); + } + con->in_data_crc = crc; + + return 1; /* must return > 0 to indicate success */ +} + /* * read (part of) a message. */ @@ -1141,7 +1177,13 @@ static int read_partial_message(struct ceph_connection *con) /* (page) data */ if (data_len) { - ret = read_partial_msg_data(con); + if (!m->num_data_items) + return -EIO; + + if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) + ret = read_partial_msg_data_bounce(con); + else + ret = read_partial_msg_data(con); if (ret <= 0) return ret; } diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index 2ea00489e691..c81379f93ad5 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -1753,7 +1753,7 @@ static int prepare_read_control_remainder(struct ceph_connection *con) return 0; } -static void prepare_read_data(struct ceph_connection *con) +static int prepare_read_data(struct ceph_connection *con) { struct bio_vec bv; @@ -1762,23 +1762,55 @@ static void prepare_read_data(struct ceph_connection *con) data_len(con->in_msg)); get_bvec_at(&con->v2.in_cursor, &bv); - set_in_bvec(con, &bv); + if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { + if (unlikely(!con->bounce_page)) { + con->bounce_page = alloc_page(GFP_NOIO); + if (!con->bounce_page) { + pr_err("failed to allocate bounce page\n"); + return -ENOMEM; + } + } + + bv.bv_page = con->bounce_page; + bv.bv_offset = 0; + set_in_bvec(con, &bv); + } else { + set_in_bvec(con, &bv); + } con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT; + return 0; } static void prepare_read_data_cont(struct ceph_connection *con) { struct bio_vec bv; - con->in_data_crc = ceph_crc32c_page(con->in_data_crc, - con->v2.in_bvec.bv_page, - con->v2.in_bvec.bv_offset, - con->v2.in_bvec.bv_len); + if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { + con->in_data_crc = crc32c(con->in_data_crc, + page_address(con->bounce_page), + con->v2.in_bvec.bv_len); + + get_bvec_at(&con->v2.in_cursor, &bv); + memcpy_to_page(bv.bv_page, bv.bv_offset, + page_address(con->bounce_page), + con->v2.in_bvec.bv_len); + } else { + con->in_data_crc = ceph_crc32c_page(con->in_data_crc, + con->v2.in_bvec.bv_page, + con->v2.in_bvec.bv_offset, + con->v2.in_bvec.bv_len); + } ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len); if (con->v2.in_cursor.total_resid) { get_bvec_at(&con->v2.in_cursor, &bv); - set_in_bvec(con, &bv); + if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { + bv.bv_page = con->bounce_page; + bv.bv_offset = 0; + set_in_bvec(con, &bv); + } else { + set_in_bvec(con, &bv); + } WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT); return; } @@ -1791,14 +1823,13 @@ static void prepare_read_data_cont(struct ceph_connection *con) con->v2.in_state = IN_S_HANDLE_EPILOGUE; } -static void prepare_read_tail_plain(struct ceph_connection *con) +static int prepare_read_tail_plain(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; if (!front_len(msg) && !middle_len(msg)) { WARN_ON(!data_len(msg)); - prepare_read_data(con); - return; + return prepare_read_data(con); } reset_in_kvecs(con); @@ -1817,6 +1848,7 @@ static void prepare_read_tail_plain(struct ceph_connection *con) add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN); con->v2.in_state = IN_S_HANDLE_EPILOGUE; } + return 0; } static void prepare_read_enc_page(struct ceph_connection *con) @@ -2699,8 +2731,7 @@ static int __handle_control(struct ceph_connection *con, void *p) if (con_secure(con)) return prepare_read_tail_secure(con); - prepare_read_tail_plain(con); - return 0; + return prepare_read_tail_plain(con); } static int handle_preamble(struct ceph_connection *con) @@ -2856,8 +2887,7 @@ static int populate_in_iter(struct ceph_connection *con) ret = handle_control_remainder(con); break; case IN_S_PREPARE_READ_DATA: - prepare_read_data(con); - ret = 0; + ret = prepare_read_data(con); break; case IN_S_PREPARE_READ_DATA_CONT: prepare_read_data_cont(con); -- cgit v1.2.3-71-gd317 From 043cfff99a18933fda2fb2e163daee73cc07910b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 23 Dec 2021 17:23:00 +0100 Subject: firmware: ti_sci: Fix compilation failure when CONFIG_TI_SCI_PROTOCOL is not defined Remove an extra ";" which breaks compilation. Fixes: 53bf2b0e4e4c ("firmware: ti_sci: Add support for getting resource with subtype") Signed-off-by: Christophe JAILLET Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/e6c3cb793e1a6a2a0ae2528d5a5650dfe6a4b6ff.1640276505.git.christophe.jaillet@wanadoo.fr --- include/linux/soc/ti/ti_sci_protocol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 0aad7009b50e..bd0d11af76c5 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -645,7 +645,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, static inline struct ti_sci_resource * devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, - u32 dev_id, u32 sub_type); + u32 dev_id, u32 sub_type) { return ERR_PTR(-EINVAL); } -- cgit v1.2.3-71-gd317 From bfb1a7c91fb7758273b4a8d735313d9cc388b502 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 2 Feb 2022 12:55:53 -0800 Subject: x86/bug: Merge annotate_reachable() into _BUG_FLAGS() asm In __WARN_FLAGS(), we had two asm statements (abbreviated): asm volatile("ud2"); asm volatile(".pushsection .discard.reachable"); These pair of statements are used to trigger an exception, but then help objtool understand that for warnings, control flow will be restored immediately afterwards. The problem is that volatile is not a compiler barrier. GCC explicitly documents this: > Note that the compiler can move even volatile asm instructions > relative to other code, including across jump instructions. Also, no clobbers are specified to prevent instructions from subsequent statements from being scheduled by compiler before the second asm statement. This can lead to instructions from subsequent statements being emitted by the compiler before the second asm statement. Providing a scheduling model such as via -march= options enables the compiler to better schedule instructions with known latencies to hide latencies from data hazards compared to inline asm statements in which latencies are not estimated. If an instruction gets scheduled by the compiler between the two asm statements, then objtool will think that it is not reachable, producing a warning. To prevent instructions from being scheduled in between the two asm statements, merge them. Also remove an unnecessary unreachable() asm annotation from BUG() in favor of __builtin_unreachable(). objtool is able to track that the ud2 from BUG() terminates control flow within the function. Link: https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#Volatile Link: https://github.com/ClangBuiltLinux/linux/issues/1483 Signed-off-by: Nick Desaulniers Signed-off-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220202205557.2260694-1-ndesaulniers@google.com --- arch/x86/include/asm/bug.h | 20 +++++++++++--------- include/linux/compiler.h | 21 +++++---------------- 2 files changed, 16 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 84b87538a15d..bab883c0b6fe 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -22,7 +22,7 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE -#define _BUG_FLAGS(ins, flags) \ +#define _BUG_FLAGS(ins, flags, extra) \ do { \ asm_inline volatile("1:\t" ins "\n" \ ".pushsection __bug_table,\"aw\"\n" \ @@ -31,7 +31,8 @@ do { \ "\t.word %c1" "\t# bug_entry::line\n" \ "\t.word %c2" "\t# bug_entry::flags\n" \ "\t.org 2b+%c3\n" \ - ".popsection" \ + ".popsection\n" \ + extra \ : : "i" (__FILE__), "i" (__LINE__), \ "i" (flags), \ "i" (sizeof(struct bug_entry))); \ @@ -39,14 +40,15 @@ do { \ #else /* !CONFIG_DEBUG_BUGVERBOSE */ -#define _BUG_FLAGS(ins, flags) \ +#define _BUG_FLAGS(ins, flags, extra) \ do { \ asm_inline volatile("1:\t" ins "\n" \ ".pushsection __bug_table,\"aw\"\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "\t.word %c0" "\t# bug_entry::flags\n" \ "\t.org 2b+%c1\n" \ - ".popsection" \ + ".popsection\n" \ + extra \ : : "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) @@ -55,7 +57,7 @@ do { \ #else -#define _BUG_FLAGS(ins, flags) asm volatile(ins) +#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins) #endif /* CONFIG_GENERIC_BUG */ @@ -63,8 +65,8 @@ do { \ #define BUG() \ do { \ instrumentation_begin(); \ - _BUG_FLAGS(ASM_UD2, 0); \ - unreachable(); \ + _BUG_FLAGS(ASM_UD2, 0, ""); \ + __builtin_unreachable(); \ } while (0) /* @@ -75,9 +77,9 @@ do { \ */ #define __WARN_FLAGS(flags) \ do { \ + __auto_type f = BUGFLAG_WARNING|(flags); \ instrumentation_begin(); \ - _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ - annotate_reachable(); \ + _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \ instrumentation_end(); \ } while (0) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 429dcebe2b99..0f7fd205ab7e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, */ #define __stringify_label(n) #n -#define __annotate_reachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t" : : "i" (c)); \ -}) -#define annotate_reachable() __annotate_reachable(__COUNTER__) - #define __annotate_unreachable(c) ({ \ asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.unreachable\n\t" \ @@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, }) #define annotate_unreachable() __annotate_unreachable(__COUNTER__) -#define ASM_UNREACHABLE \ - "999:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long 999b - .\n\t" \ +#define ASM_REACHABLE \ + "998:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long 998b - .\n\t" \ ".popsection\n\t" /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(".rodata..c_jump_table") #else -#define annotate_reachable() #define annotate_unreachable() +# define ASM_REACHABLE #define __annotate_jump_table #endif -#ifndef ASM_UNREACHABLE -# define ASM_UNREACHABLE -#endif #ifndef unreachable # define unreachable() do { \ annotate_unreachable(); \ -- cgit v1.2.3-71-gd317 From e85c81ba8859a4c839bcd69c5d83b32954133a5b Mon Sep 17 00:00:00 2001 From: Xin Yin Date: Mon, 17 Jan 2022 17:36:54 +0800 Subject: ext4: fast commit may not fallback for ineligible commit For the follow scenario: 1. jbd start commit transaction n 2. task A get new handle for transaction n+1 3. task A do some ineligible actions and mark FC_INELIGIBLE 4. jbd complete transaction n and clean FC_INELIGIBLE 5. task A call fsync In this case fast commit will not fallback to full commit and transaction n+1 also not handled by jbd. Make ext4_fc_mark_ineligible() also record transaction tid for latest ineligible case, when call ext4_fc_cleanup() check current transaction tid, if small than latest ineligible tid do not clear the EXT4_MF_FC_INELIGIBLE. Reported-by: kernel test robot Reported-by: Dan Carpenter Reported-by: Ritesh Harjani Suggested-by: Harshad Shirwadkar Signed-off-by: Xin Yin Link: https://lore.kernel.org/r/20220117093655.35160-2-yinxin.x@bytedance.com Signed-off-by: Theodore Ts'o Cc: stable@kernel.org --- fs/ext4/ext4.h | 3 ++- fs/ext4/extents.c | 4 ++-- fs/ext4/fast_commit.c | 33 +++++++++++++++++++++++++-------- fs/ext4/inode.c | 4 ++-- fs/ext4/ioctl.c | 4 ++-- fs/ext4/namei.c | 4 ++-- fs/ext4/super.c | 1 + fs/ext4/xattr.c | 6 +++--- fs/jbd2/commit.c | 2 +- fs/jbd2/journal.c | 2 +- include/linux/jbd2.h | 2 +- 11 files changed, 42 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 598ecf07652a..d52295becda3 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1749,6 +1749,7 @@ struct ext4_sb_info { spinlock_t s_fc_lock; struct buffer_head *s_fc_bh; struct ext4_fc_stats s_fc_stats; + tid_t s_fc_ineligible_tid; #ifdef CONFIG_EXT4_DEBUG int s_fc_debug_max_replay; #endif @@ -2925,7 +2926,7 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode, struct dentry *dentry); void ext4_fc_track_create(handle_t *handle, struct dentry *dentry); void ext4_fc_track_inode(handle_t *handle, struct inode *inode); -void ext4_fc_mark_ineligible(struct super_block *sb, int reason); +void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle); void ext4_fc_start_update(struct inode *inode); void ext4_fc_stop_update(struct inode *inode); void ext4_fc_del(struct inode *inode); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 5dd13108d4b7..3ce4fc250b0d 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5336,7 +5336,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) ret = PTR_ERR(handle); goto out_mmap; } - ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode, 0); @@ -5476,7 +5476,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) ret = PTR_ERR(handle); goto out_mmap; } - ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); /* Expand file to avoid data loss if there is error while shifting */ inode->i_size += len; diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 1abe78b8d84f..e031afee42de 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -300,18 +300,32 @@ restart: } /* - * Mark file system as fast commit ineligible. This means that next commit - * operation would result in a full jbd2 commit. + * Mark file system as fast commit ineligible, and record latest + * ineligible transaction tid. This means until the recorded + * transaction, commit operation would result in a full jbd2 commit. */ -void ext4_fc_mark_ineligible(struct super_block *sb, int reason) +void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle) { struct ext4_sb_info *sbi = EXT4_SB(sb); + tid_t tid; if (!test_opt2(sb, JOURNAL_FAST_COMMIT) || (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) return; ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); + if (handle && !IS_ERR(handle)) + tid = handle->h_transaction->t_tid; + else { + read_lock(&sbi->s_journal->j_state_lock); + tid = sbi->s_journal->j_running_transaction ? + sbi->s_journal->j_running_transaction->t_tid : 0; + read_unlock(&sbi->s_journal->j_state_lock); + } + spin_lock(&sbi->s_fc_lock); + if (sbi->s_fc_ineligible_tid < tid) + sbi->s_fc_ineligible_tid = tid; + spin_unlock(&sbi->s_fc_lock); WARN_ON(reason >= EXT4_FC_REASON_MAX); sbi->s_fc_stats.fc_ineligible_reason_count[reason]++; } @@ -387,7 +401,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update) mutex_unlock(&ei->i_fc_lock); node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS); if (!node) { - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM); + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } @@ -400,7 +414,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update) if (!node->fcd_name.name) { kmem_cache_free(ext4_fc_dentry_cachep, node); ext4_fc_mark_ineligible(inode->i_sb, - EXT4_FC_REASON_NOMEM); + EXT4_FC_REASON_NOMEM, NULL); mutex_lock(&ei->i_fc_lock); return -ENOMEM; } @@ -502,7 +516,7 @@ void ext4_fc_track_inode(handle_t *handle, struct inode *inode) if (ext4_should_journal_data(inode)) { ext4_fc_mark_ineligible(inode->i_sb, - EXT4_FC_REASON_INODE_JOURNAL_DATA); + EXT4_FC_REASON_INODE_JOURNAL_DATA, handle); return; } @@ -1179,7 +1193,7 @@ fallback: * Fast commit cleanup routine. This is called after every fast commit and * full commit. full is true if we are called after a full commit. */ -static void ext4_fc_cleanup(journal_t *journal, int full) +static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) { struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -1227,7 +1241,10 @@ static void ext4_fc_cleanup(journal_t *journal, int full) &sbi->s_fc_q[FC_Q_MAIN]); ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING); - ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); + if (tid >= sbi->s_fc_ineligible_tid) { + sbi->s_fc_ineligible_tid = 0; + ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); + } if (full) sbi->s_fc_bytes = 0; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9dbeb772de60..f368dd5fd8d5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -337,7 +337,7 @@ stop_handle: return; no_delete: if (!list_empty(&EXT4_I(inode)->i_fc_list)) - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM); + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } @@ -5976,7 +5976,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) return PTR_ERR(handle); ext4_fc_mark_ineligible(inode->i_sb, - EXT4_FC_REASON_JOURNAL_FLAG_CHANGE); + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bbbedf27b71c..a8022c2c6a58 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -411,7 +411,7 @@ static long swap_inode_boot_loader(struct super_block *sb, err = -EINVAL; goto err_out; } - ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle); /* Protect extent tree against block allocations via delalloc */ ext4_double_down_write_data_sem(inode, inode_bl); @@ -1373,7 +1373,7 @@ mext_out: err = ext4_resize_fs(sb, n_blocks_count); if (EXT4_SB(sb)->s_journal) { - ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE); + ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 52c9bd154122..47b9f87dbc6f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3889,7 +3889,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir, * dirents in directories. */ ext4_fc_mark_ineligible(old.inode->i_sb, - EXT4_FC_REASON_RENAME_DIR); + EXT4_FC_REASON_RENAME_DIR, handle); } else { if (new.inode) ext4_fc_track_unlink(handle, new.dentry); @@ -4049,7 +4049,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, if (unlikely(retval)) goto end_rename; ext4_fc_mark_ineligible(new.inode->i_sb, - EXT4_FC_REASON_CROSS_RENAME); + EXT4_FC_REASON_CROSS_RENAME, handle); if (old.dir_bh) { retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino); if (retval) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9a936ecbaa3b..6930b7737ce4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5084,6 +5084,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) sbi->s_fc_bytes = 0; ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING); + sbi->s_fc_ineligible_tid = 0; spin_lock_init(&sbi->s_fc_lock); memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); sbi->s_fc_replay_state.fc_regions = NULL; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 1e0fc1ed845b..042325349098 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2408,7 +2408,7 @@ retry_inode: if (IS_SYNC(inode)) ext4_handle_sync(handle); } - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle); cleanup: brelse(is.iloc.bh); @@ -2486,7 +2486,7 @@ retry: if (error == 0) error = error2; } - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL); return error; } @@ -2920,7 +2920,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, error); goto cleanup; } - ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR); + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle); } error = 0; cleanup: diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 3cc4ab2ba7f4..d188fa913a07 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -1170,7 +1170,7 @@ restart_loop: if (journal->j_commit_callback) journal->j_commit_callback(journal, commit_transaction); if (journal->j_fc_cleanup_callback) - journal->j_fc_cleanup_callback(journal, 1); + journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid); trace_jbd2_end_commit(journal, commit_transaction); jbd_debug(1, "JBD2: commit %d complete, head %d\n", diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 0b86a4365b66..a8e64ad11ae3 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -771,7 +771,7 @@ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback) { jbd2_journal_unlock_updates(journal); if (journal->j_fc_cleanup_callback) - journal->j_fc_cleanup_callback(journal, 0); + journal->j_fc_cleanup_callback(journal, 0, tid); write_lock(&journal->j_state_lock); journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; if (fallback) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fd933c45281a..d63b8106796e 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1295,7 +1295,7 @@ struct journal_s * Clean-up after fast commit or full commit. JBD2 calls this function * after every commit operation. */ - void (*j_fc_cleanup_callback)(struct journal_s *journal, int); + void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid); /** * @j_fc_replay_callback: -- cgit v1.2.3-71-gd317 From 3ca40c0d329113a9f76f6aa01abe73d9f16ace9d Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Mon, 17 Jan 2022 17:41:50 +0530 Subject: jbd2: cleanup unused functions declarations from jbd2.h During code review found no references of few of these below function declarations. This patch cleans those up from jbd2.h Signed-off-by: Ritesh Harjani Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/30d1fc327becda197a4136cf9cdc73d9baa3b7b9.1642416995.git.riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- include/linux/jbd2.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d63b8106796e..afc5572e7b8a 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1419,9 +1419,7 @@ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); extern bool __jbd2_journal_refile_buffer(struct journal_head *); extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); -extern void __journal_free_buffer(struct journal_head *bh); extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); -extern void __journal_clean_data_list(transaction_t *transaction); static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh) { list_add_tail(&bh->b_assoc_buffers, head); @@ -1486,9 +1484,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct buffer_head **bh_out, sector_t blocknr); -/* Transaction locking */ -extern void __wait_on_journal (journal_t *); - /* Transaction cache support */ extern void jbd2_journal_destroy_transaction_cache(void); extern int __init jbd2_journal_init_transaction_cache(void); @@ -1774,8 +1769,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) #define BJ_Reserved 4 /* Buffer is reserved for access by journal */ #define BJ_Types 5 -extern int jbd_blocks_per_page(struct inode *inode); - /* JBD uses a CRC32 checksum */ #define JBD_MAX_CHECKSUM_SIZE 4 -- cgit v1.2.3-71-gd317 From 4f98186848707f530669238d90e0562d92a78aab Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Mon, 17 Jan 2022 17:41:51 +0530 Subject: jbd2: refactor wait logic for transaction updates into a common function No functionality change as such in this patch. This only refactors the common piece of code which waits for t_updates to finish into a common function named as jbd2_journal_wait_updates(journal_t *) Signed-off-by: Ritesh Harjani Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/8c564f70f4b2591171677a2a74fccb22a7b6c3a4.1642416995.git.riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/jbd2/commit.c | 19 +++--------------- fs/jbd2/transaction.c | 53 +++++++++++++++++++++++++++++++-------------------- include/linux/jbd2.h | 4 +++- 3 files changed, 38 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index d188fa913a07..5b9408e3b370 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -484,22 +484,9 @@ void jbd2_journal_commit_transaction(journal_t *journal) stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start, stats.run.rs_locked); - spin_lock(&commit_transaction->t_handle_lock); - while (atomic_read(&commit_transaction->t_updates)) { - DEFINE_WAIT(wait); + // waits for any t_updates to finish + jbd2_journal_wait_updates(journal); - prepare_to_wait(&journal->j_wait_updates, &wait, - TASK_UNINTERRUPTIBLE); - if (atomic_read(&commit_transaction->t_updates)) { - spin_unlock(&commit_transaction->t_handle_lock); - write_unlock(&journal->j_state_lock); - schedule(); - write_lock(&journal->j_state_lock); - spin_lock(&commit_transaction->t_handle_lock); - } - finish_wait(&journal->j_wait_updates, &wait); - } - spin_unlock(&commit_transaction->t_handle_lock); commit_transaction->t_state = T_SWITCH; write_unlock(&journal->j_state_lock); @@ -817,7 +804,7 @@ start_journal_io: commit_transaction->t_state = T_COMMIT_DFLUSH; write_unlock(&journal->j_state_lock); - /* + /* * If the journal is not located on the file system device, * then we must flush the file system device before we issue * the commit record diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6a3caedd2285..8e2f8275a253 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -449,7 +449,7 @@ repeat: } /* OK, account for the buffers that this operation expects to - * use and add the handle to the running transaction. + * use and add the handle to the running transaction. */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; @@ -836,6 +836,35 @@ int jbd2_journal_restart(handle_t *handle, int nblocks) } EXPORT_SYMBOL(jbd2_journal_restart); +/* + * Waits for any outstanding t_updates to finish. + * This is called with write j_state_lock held. + */ +void jbd2_journal_wait_updates(journal_t *journal) +{ + transaction_t *commit_transaction = journal->j_running_transaction; + + if (!commit_transaction) + return; + + spin_lock(&commit_transaction->t_handle_lock); + while (atomic_read(&commit_transaction->t_updates)) { + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_wait_updates, &wait, + TASK_UNINTERRUPTIBLE); + if (atomic_read(&commit_transaction->t_updates)) { + spin_unlock(&commit_transaction->t_handle_lock); + write_unlock(&journal->j_state_lock); + schedule(); + write_lock(&journal->j_state_lock); + spin_lock(&commit_transaction->t_handle_lock); + } + finish_wait(&journal->j_wait_updates, &wait); + } + spin_unlock(&commit_transaction->t_handle_lock); +} + /** * jbd2_journal_lock_updates () - establish a transaction barrier. * @journal: Journal to establish a barrier on. @@ -863,27 +892,9 @@ void jbd2_journal_lock_updates(journal_t *journal) write_lock(&journal->j_state_lock); } - /* Wait until there are no running updates */ - while (1) { - transaction_t *transaction = journal->j_running_transaction; - - if (!transaction) - break; + /* Wait until there are no running t_updates */ + jbd2_journal_wait_updates(journal); - spin_lock(&transaction->t_handle_lock); - prepare_to_wait(&journal->j_wait_updates, &wait, - TASK_UNINTERRUPTIBLE); - if (!atomic_read(&transaction->t_updates)) { - spin_unlock(&transaction->t_handle_lock); - finish_wait(&journal->j_wait_updates, &wait); - break; - } - spin_unlock(&transaction->t_handle_lock); - write_unlock(&journal->j_state_lock); - schedule(); - finish_wait(&journal->j_wait_updates, &wait); - write_lock(&journal->j_state_lock); - } write_unlock(&journal->j_state_lock); /* diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index afc5572e7b8a..9c3ada74ffb1 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -594,7 +594,7 @@ struct transaction_s */ unsigned long t_log_start; - /* + /* * Number of buffers on the t_buffers list [j_list_lock, no locks * needed for jbd2 thread] */ @@ -1538,6 +1538,8 @@ extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); extern void jbd2_journal_lock_updates (journal_t *); extern void jbd2_journal_unlock_updates (journal_t *); +void jbd2_journal_wait_updates(journal_t *); + extern journal_t * jbd2_journal_init_dev(struct block_device *bdev, struct block_device *fs_dev, unsigned long long start, int len, int bsize); -- cgit v1.2.3-71-gd317 From 67d6212afda218d564890d1674bab28e8612170f Mon Sep 17 00:00:00 2001 From: Igor Pylypiv Date: Thu, 27 Jan 2022 15:39:53 -0800 Subject: Revert "module, async: async_synchronize_full() on module init iff async is used" This reverts commit 774a1221e862b343388347bac9b318767336b20b. We need to finish all async code before the module init sequence is done. In the reverted commit the PF_USED_ASYNC flag was added to mark a thread that called async_schedule(). Then the PF_USED_ASYNC flag was used to determine whether or not async_synchronize_full() needs to be invoked. This works when modprobe thread is calling async_schedule(), but it does not work if module dispatches init code to a worker thread which then calls async_schedule(). For example, PCI driver probing is invoked from a worker thread based on a node where device is attached: if (cpu < nr_cpu_ids) error = work_on_cpu(cpu, local_pci_probe, &ddi); else error = local_pci_probe(&ddi); We end up in a situation where a worker thread gets the PF_USED_ASYNC flag set instead of the modprobe thread. As a result, async_synchronize_full() is not invoked and modprobe completes without waiting for the async code to finish. The issue was discovered while loading the pm80xx driver: (scsi_mod.scan=async) modprobe pm80xx worker ... do_init_module() ... pci_call_probe() work_on_cpu(local_pci_probe) local_pci_probe() pm8001_pci_probe() scsi_scan_host() async_schedule() worker->flags |= PF_USED_ASYNC; ... < return from worker > ... if (current->flags & PF_USED_ASYNC) <--- false async_synchronize_full(); Commit 21c3c5d28007 ("block: don't request module during elevator init") fixed the deadlock issue which the reverted commit 774a1221e862 ("module, async: async_synchronize_full() on module init iff async is used") tried to fix. Since commit 0fdff3ec6d87 ("async, kmod: warn on synchronous request_module() from async workers") synchronous module loading from async is not allowed. Given that the original deadlock issue is fixed and it is no longer allowed to call synchronous request_module() from async we can remove PF_USED_ASYNC flag to make module init consistently invoke async_synchronize_full() unless async module probe is requested. Signed-off-by: Igor Pylypiv Reviewed-by: Changyuan Lyu Reviewed-by: Luis Chamberlain Acked-by: Tejun Heo Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 - kernel/async.c | 3 --- kernel/module.c | 25 +++++-------------------- 3 files changed, 5 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index f5b2be39a78c..75ba8aa60248 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1680,7 +1680,6 @@ extern struct pid *cad_pid; #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ #define PF_KSWAPD 0x00020000 /* I am kswapd */ diff --git a/kernel/async.c b/kernel/async.c index b8d7a663497f..b2c4ba5686ee 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* mark that this task has queued an async job, used by module init */ - current->flags |= PF_USED_ASYNC; - /* schedule for execution */ queue_work_node(node, system_unbound_wq, &entry->work); diff --git a/kernel/module.c b/kernel/module.c index 24dab046e16c..46a5c2ed1928 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3725,12 +3725,6 @@ static noinline int do_init_module(struct module *mod) } freeinit->module_init = mod->init_layout.base; - /* - * We want to find out whether @mod uses async during init. Clear - * PF_USED_ASYNC. async_schedule*() will set it. - */ - current->flags &= ~PF_USED_ASYNC; - do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) @@ -3756,22 +3750,13 @@ static noinline int do_init_module(struct module *mod) /* * We need to finish all async code before the module init sequence - * is done. This has potential to deadlock. For example, a newly - * detected block device can trigger request_module() of the - * default iosched from async probing task. Once userland helper - * reaches here, async_synchronize_full() will wait on the async - * task waiting on request_module() and deadlock. - * - * This deadlock is avoided by perfomring async_synchronize_full() - * iff module init queued any async jobs. This isn't a full - * solution as it will deadlock the same if module loading from - * async jobs nests more than once; however, due to the various - * constraints, this hack seems to be the best option for now. - * Please refer to the following thread for details. + * is done. This has potential to deadlock if synchronous module + * loading is requested from async (which is not allowed!). * - * http://thread.gmane.org/gmane.linux.kernel/1420814 + * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous + * request_module() from async workers") for more details. */ - if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) + if (!mod->async_probe_requested) async_synchronize_full(); ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + -- cgit v1.2.3-71-gd317 From 22f56b8e890d4e2835951b437bb6eeebfd1cb18b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 3 Feb 2022 16:01:39 -0500 Subject: XArray: Include bitmap.h from xarray.h xas_find_chunk() calls find_next_bit(), which is defined in find.h, included from bitmap.h. Inside the kernel, this isn't a problem because bitmap.h is included from cpumask.h which is dragged in (eventually) by gfp.h. When building the test-suite, that doesn't happen, so we need to include bitmap.h explicitly. Fixes: 4ade0818cf04 ("tools: sync tools/bitmap with mother linux") Reported-by: Liam Howlett Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/xarray.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index d6d5da6ed735..66e28bc1a023 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -9,6 +9,7 @@ * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include #include #include #include -- cgit v1.2.3-71-gd317 From 00edb2bac29f2e9c1674e85479fa5e3aa8cc648f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 10:55:44 -0800 Subject: i40e: remove enum i40e_client_state It's not used. Signed-off-by: Jakub Kicinski Reviewed-by: Jesse Brandeburg Tested-by: Gurucharan G Signed-off-by: Tony Nguyen --- include/linux/net/intel/i40e_client.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index 6b3267b49755..ed42bd5f639f 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -26,11 +26,6 @@ struct i40e_client_version { u8 rsvd; }; -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED -}; - enum i40e_client_instance_state { __I40E_CLIENT_INSTANCE_NONE, __I40E_CLIENT_INSTANCE_OPENED, @@ -190,11 +185,6 @@ struct i40e_client { const struct i40e_client_ops *ops; /* client ops provided by the client */ }; -static inline bool i40e_client_is_registered(struct i40e_client *client) -{ - return test_bit(__I40E_CLIENT_REGISTERED, &client->state); -} - void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client); void i40e_client_device_unregister(struct i40e_info *ldev); -- cgit v1.2.3-71-gd317 From 3a99f121fe0bfa4b65ff74d9e980018caf54c2d4 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:01 -0800 Subject: firmware: qcom: scm: Introduce pas_metadata context Starting with Qualcomm SM8450, some new security enhancements has been done in the secure world, which results in the requirement to keep the metadata segment accessible by the secure world from init_image() until auth_and_reset(). Introduce a "PAS metadata context" object that can be passed to init_image() for tracking the mapped memory and a related release function for client drivers to release the mapping once either auth_and_reset() has been invoked or in error handling paths on the way there. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-2-bjorn.andersson@linaro.org --- drivers/firmware/qcom_scm.c | 39 ++++++++++++++++++++++++++++++++++----- drivers/soc/qcom/mdt_loader.c | 2 +- include/linux/qcom_scm.h | 10 +++++++++- 3 files changed, 44 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 927738882e54..00f8a50b9f6a 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -432,10 +432,16 @@ static void qcom_scm_set_download_mode(bool enable) * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata + * @ctx: optional metadata context * - * Returns 0 on success. + * Return: 0 on success. + * + * Upon successful return, the PAS metadata context (@ctx) will be used to + * track the metadata allocation, this needs to be released by invoking + * qcom_scm_pas_metadata_release() by the caller. */ -int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx) { dma_addr_t mdata_phys; void *mdata_buf; @@ -464,7 +470,7 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) ret = qcom_scm_clk_enable(); if (ret) - goto free_metadata; + goto out; desc.args[1] = mdata_phys; @@ -472,13 +478,36 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) qcom_scm_clk_disable(); -free_metadata: - dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); +out: + if (ret < 0 || !ctx) { + dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + } else if (ctx) { + ctx->ptr = mdata_buf; + ctx->phys = mdata_phys; + ctx->size = size; + } return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_init_image); +/** + * qcom_scm_pas_metadata_release() - release metadata context + * @ctx: metadata context + */ +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) +{ + if (!ctx->ptr) + return; + + dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); + + ctx->ptr = NULL; + ctx->phys = 0; + ctx->size = 0; +} +EXPORT_SYMBOL(qcom_scm_pas_metadata_release); + /** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 72fc2b539213..b00586db5391 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -171,7 +171,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, goto out; } - ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len); + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, NULL); kfree(metadata); if (ret) { diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index ca4a88d7cbdc..681748619890 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -68,8 +68,16 @@ extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); extern void qcom_scm_cpu_power_down(u32 flags); extern int qcom_scm_set_remote_state(u32 state, u32 id); +struct qcom_scm_pas_metadata { + void *ptr; + dma_addr_t phys; + ssize_t size; +}; + extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); + size_t size, + struct qcom_scm_pas_metadata *ctx); +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); -- cgit v1.2.3-71-gd317 From 8bd42e2341a7857010001f08ee1729ced3b0e394 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:03 -0800 Subject: soc: qcom: mdt_loader: Allow hash segment to be split out It's been observed that some firmware found in a Qualcomm SM8450 device has the hash table in a separate .bNN file. Use the newly extracted helper function to load this segment from the separate file, if it's determined that the hashes are not part of the already loaded firmware. In order to do this, the function needs access to the firmware basename and to provide more useful error messages a struct device to associate the errors with. Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-4-bjorn.andersson@linaro.org --- drivers/remoteproc/qcom_q6v5_mss.c | 7 ++++--- drivers/soc/qcom/mdt_loader.c | 29 +++++++++++++++++++++-------- include/linux/soc/qcom/mdt_loader.h | 6 ++++-- 3 files changed, 29 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 43ea8455546c..a2c231a17b2b 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -928,7 +928,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); } -static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) +static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, + const char *fw_name) { unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; @@ -939,7 +940,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) void *ptr; int ret; - metadata = qcom_mdt_read_metadata(fw, &size); + metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev); if (IS_ERR(metadata)) return PTR_ERR(metadata); @@ -1289,7 +1290,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) /* Initialize the RMB validator */ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); - ret = q6v5_mpss_init_image(qproc, fw); + ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image); if (ret) goto release_firmware; diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index c9e5bdfac371..4372d8e38b29 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -121,13 +121,15 @@ EXPORT_SYMBOL_GPL(qcom_mdt_get_size); * * Return: pointer to data, or ERR_PTR() */ -void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev) { const struct elf32_phdr *phdrs; const struct elf32_hdr *ehdr; size_t hash_offset; size_t hash_size; size_t ehdr_size; + ssize_t ret; void *data; ehdr = (struct elf32_hdr *)fw->data; @@ -149,14 +151,25 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) if (!data) return ERR_PTR(-ENOMEM); - /* Is the header and hash already packed */ - if (ehdr_size + hash_size == fw->size) + /* Copy ELF header */ + memcpy(data, fw->data, ehdr_size); + + if (ehdr_size + hash_size == fw->size) { + /* Firmware is split and hash is packed following the ELF header */ hash_offset = phdrs[0].p_filesz; - else + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else if (phdrs[1].p_offset + hash_size <= fw->size) { + /* Hash is in its own segment, but within the loaded file */ hash_offset = phdrs[1].p_offset; - - memcpy(data, fw->data, ehdr_size); - memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else { + /* Hash is in its own segment, beyond the loaded file */ + ret = mdt_load_split_segment(data + ehdr_size, phdrs, 1, fw_name, dev); + if (ret) { + kfree(data); + return ERR_PTR(ret); + } + } *data_len = ehdr_size + hash_size; @@ -190,7 +203,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, phdrs = (struct elf32_phdr *)(ehdr + 1); if (pas_init) { - metadata = qcom_mdt_read_metadata(fw, &metadata_len); + metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); if (IS_ERR(metadata)) { ret = PTR_ERR(metadata); dev_err(dev, "error %d reading firmware %s metadata\n", diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index afd47217996b..46bdb7bace9a 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -23,7 +23,8 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); -void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev); #else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ @@ -51,7 +52,8 @@ static inline int qcom_mdt_load_no_init(struct device *dev, } static inline void *qcom_mdt_read_metadata(const struct firmware *fw, - size_t *data_len) + size_t *data_len, const char *fw_name, + struct device *dev) { return ERR_PTR(-ENODEV); } -- cgit v1.2.3-71-gd317 From b794eecb2af77145d36b9c28f056e242add9c4b2 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Tue, 23 Nov 2021 10:25:36 -0800 Subject: ice: add support for DSCP QoS for IDC The ice driver provides QoS information to auxiliary drivers through the exported function ice_get_qos_params. This function doesn't currently support L3 DSCP QoS. Add the necessary defines, structure elements and code to support DSCP QoS through the IIDC functions. Signed-off-by: Dave Ertman Signed-off-by: Shiraz Saleem Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_idc.c | 5 +++++ include/linux/net/intel/iidc.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index fc3580167e7b..263a2e7577a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -227,6 +227,11 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; + + qos->pfc_mode = dcbx_cfg->pfc_mode; + if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) + for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) + qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; } EXPORT_SYMBOL_GPL(ice_get_qos_params); diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h index 1289593411d3..1c1332e4df26 100644 --- a/include/linux/net/intel/iidc.h +++ b/include/linux/net/intel/iidc.h @@ -32,6 +32,8 @@ enum iidc_rdma_protocol { }; #define IIDC_MAX_USER_PRIORITY 8 +#define IIDC_MAX_DSCP_MAPPING 64 +#define IIDC_DSCP_PFC_MODE 0x1 /* Struct to hold per RDMA Qset info */ struct iidc_rdma_qset_params { @@ -60,6 +62,8 @@ struct iidc_qos_params { u8 vport_relative_bw; u8 vport_priority_type; u8 num_tc; + u8 pfc_mode; + u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; }; struct iidc_event { -- cgit v1.2.3-71-gd317 From f4e526ff7e38e27bb87d53131d227a6fd6f73ab5 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 18:55:08 -0800 Subject: soc: qcom: mdt_loader: Extract PAS operations Rather than passing a boolean to indicate if the PAS operations should be performed from within __mdt_load(), extract them to their own helper function. This will allow clients to invoke this directly, with some qcom_scm_pas_metadata context that they later needs to release, without further having to complicate the prototype of qcom_mdt_load(). Signed-off-by: Bjorn Andersson Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20220128025513.97188-9-bjorn.andersson@linaro.org --- drivers/soc/qcom/mdt_loader.c | 110 ++++++++++++++++++++++++------------ include/linux/soc/qcom/mdt_loader.h | 11 ++++ 2 files changed, 85 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index c8d43dc50cff..f0b1d969567c 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -188,6 +188,74 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, } EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); +/** + * qcom_mdt_pas_init() - initialize PAS region for firmware loading + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @firmware: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_phys: physical address of allocated memory region + * @ctx: PAS metadata context, to be released by caller + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *ctx) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = PHYS_ADDR_MAX; + phys_addr_t max_addr = 0; + size_t metadata_len; + void *metadata; + int ret; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); + if (IS_ERR(metadata)) { + ret = PTR_ERR(metadata); + dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name); + goto out; + } + + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx); + kfree(metadata); + if (ret) { + /* Invalid firmware metadata */ + dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name); + goto out; + } + + ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); + if (ret) { + /* Unable to set up relocation */ + dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(qcom_mdt_pas_init); + static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -198,10 +266,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const struct elf32_hdr *ehdr; phys_addr_t mem_reloc; phys_addr_t min_addr = PHYS_ADDR_MAX; - phys_addr_t max_addr = 0; - size_t metadata_len; ssize_t offset; - void *metadata; bool relocate = false; void *ptr; int ret = 0; @@ -224,37 +289,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (phdr->p_paddr < min_addr) min_addr = phdr->p_paddr; - - if (phdr->p_paddr + phdr->p_memsz > max_addr) - max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); - } - - if (pas_init) { - metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); - if (IS_ERR(metadata)) { - ret = PTR_ERR(metadata); - dev_err(dev, "error %d reading firmware %s metadata\n", - ret, fw_name); - goto out; - } - - ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, NULL); - - kfree(metadata); - if (ret) { - /* Invalid firmware metadata */ - dev_err(dev, "error %d initializing firmware %s\n", - ret, fw_name); - goto out; - } - - ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); - if (ret) { - /* Unable to set up relocation */ - dev_err(dev, "error %d setting up firmware %s\n", - ret, fw_name); - goto out; - } } if (relocate) { @@ -319,8 +353,6 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (reloc_base) *reloc_base = mem_reloc; -out: - return ret; } @@ -342,6 +374,12 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base) { + int ret; + + ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL); + if (ret) + return ret; + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, mem_size, reloc_base, true); } diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index 46bdb7bace9a..9e8e60421192 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -10,10 +10,14 @@ struct device; struct firmware; +struct qcom_scm_pas_metadata; #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -33,6 +37,13 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw) return -ENODEV; } +static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx) +{ + return -ENODEV; +} + static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, -- cgit v1.2.3-71-gd317 From 52beb1fc237d67cdc64277dc90047767a6fc52d7 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 1 Dec 2021 14:05:04 +0100 Subject: firmware: qcom: scm: Drop cpumask parameter from set_boot_addr() qcom_scm_set_cold/warm_boot_addr() currently take a cpumask parameter, but it's not very useful because at the end we always set the same entry address for all CPUs. This also allows speeding up probe of cpuidle-qcom-spm a bit because only one SCM call needs to be made to the TrustZone firmware, instead of one per CPU. The main reason for this change is that it allows implementing the "multi-cluster" variant of the set_boot_addr() call more easily without having to rely on functions that break in certain build configurations or that are not exported to modules. Signed-off-by: Stephan Gerhold Acked-by: Daniel Lezcano Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20211201130505.257379-4-stephan@gerhold.net --- arch/arm/mach-qcom/platsmp.c | 3 +-- drivers/cpuidle/cpuidle-qcom-spm.c | 8 ++++---- drivers/firmware/qcom_scm.c | 19 ++++++++----------- include/linux/qcom_scm.h | 4 ++-- 4 files changed, 15 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c index 58a4228455ce..65a0d5ce2bb3 100644 --- a/arch/arm/mach-qcom/platsmp.c +++ b/arch/arm/mach-qcom/platsmp.c @@ -357,8 +357,7 @@ static void __init qcom_smp_prepare_cpus(unsigned int max_cpus) { int cpu; - if (qcom_scm_set_cold_boot_addr(secondary_startup_arm, - cpu_present_mask)) { + if (qcom_scm_set_cold_boot_addr(secondary_startup_arm)) { for_each_present_cpu(cpu) { if (cpu == smp_processor_id()) continue; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index 5f27dcc6c110..beedf22cbe78 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -122,10 +122,6 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) if (ret <= 0) return ret ? : -ENODEV; - ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu)); - if (ret) - return ret; - return cpuidle_register(&data->cpuidle_driver, NULL); } @@ -136,6 +132,10 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev) if (!qcom_scm_is_available()) return -EPROBE_DEFER; + ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm); + if (ret) + return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed"); + for_each_possible_cpu(cpu) { ret = spm_cpuidle_register(&pdev->dev, cpu); if (ret && ret != -ENODEV) { diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 1bcc139c9165..0382f9fa4501 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -243,8 +243,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, return ret ? false : !!res.result[0]; } -static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, - const u8 *cpu_bits) +static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) { int cpu; unsigned int flags = 0; @@ -255,7 +254,7 @@ static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, .owner = ARM_SMCCC_OWNER_SIP, }; - for_each_cpu(cpu, cpus) { + for_each_present_cpu(cpu) { if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) return -EINVAL; flags |= cpu_bits[cpu]; @@ -268,27 +267,25 @@ static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, } /** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point * * Set the Linux entry point for the SCM to transfer control to when coming * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +int qcom_scm_set_warm_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_warm_bits); + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point */ -int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +int qcom_scm_set_cold_boot_addr(void *entry) { - return qcom_scm_set_boot_addr(entry, cpus, qcom_scm_cpu_cold_bits); + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 681748619890..f8335644a01a 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -63,8 +63,8 @@ enum qcom_scm_ice_cipher { extern bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_cold_boot_addr(void *entry); +extern int qcom_scm_set_warm_boot_addr(void *entry); extern void qcom_scm_cpu_power_down(u32 flags); extern int qcom_scm_set_remote_state(u32 state, u32 id); -- cgit v1.2.3-71-gd317 From 2651bf680bc2ad9a078b7222b0873145ab4ece07 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 3 Feb 2022 11:28:25 -0800 Subject: block: introduce BLK_STS_OFFLINE Currently, drivers reports BLK_STS_IOERR for devices that are not full online or being removed. This behavior could cause confusion for users, as they are not really I/O errors from the device. Solve this issue with a new state BLK_STS_OFFLINE, which reports "device offline error" in dmesg instead of "I/O error". EIO is intentionally kept to not change user visible return value. Signed-off-by: Song Liu Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220203192827.1370270-2-song@kernel.org Signed-off-by: Jens Axboe --- block/blk-core.c | 1 + include/linux/blk_types.h | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 61f6a0dc4511..24035dd2eef1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -164,6 +164,7 @@ static const struct { [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, + [BLK_STS_OFFLINE] = { -EIO, "device offline" }, /* device mapper special case, should not leak out: */ [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index fe065c394fff..5561e58d158a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -153,6 +153,13 @@ typedef u8 __bitwise blk_status_t; */ #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) +/* + * BLK_STS_OFFLINE is returned from the driver when the target device is offline + * or is being taken offline. This could help differentiate the case where a + * device is intentionally being shut down from a real I/O error. + */ +#define BLK_STS_OFFLINE ((__force blk_status_t)17) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with -- cgit v1.2.3-71-gd317 From 1bc91a5ddf3eaea0e0ea957cccf3abdcfcace00e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Jan 2022 13:07:01 +0100 Subject: netfilter: conntrack: handle ->destroy hook via nat_ops instead The nat module already exposes a few functions to the conntrack core. Move the nat extension destroy hook to it. After this, no conntrack extension needs a destroy hook. 'struct nf_ct_ext_type' and the register/unregister api can be removed in a followup patch. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 1 + include/net/netfilter/nf_conntrack_extend.h | 3 --- net/netfilter/nf_conntrack_core.c | 14 ++++++++++++-- net/netfilter/nf_conntrack_extend.c | 21 --------------------- net/netfilter/nf_nat_core.c | 13 +++---------- 5 files changed, 16 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 15e71bfff726..c2c6f332fb90 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -379,6 +379,7 @@ struct nf_nat_hook { unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, enum nf_nat_manip_type mtype, enum ip_conntrack_dir dir); + void (*remove_nat_bysrc)(struct nf_conn *ct); }; extern const struct nf_nat_hook __rcu *nf_nat_hook; diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 87d818414092..343f9194423a 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -79,9 +79,6 @@ void nf_ct_ext_destroy(struct nf_conn *ct); void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); struct nf_ct_ext_type { - /* Destroys relationships (can be NULL). */ - void (*destroy)(struct nf_conn *ct); - enum nf_ct_ext_id id; }; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9edd3ae8d62e..8f0c0c0fd329 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -594,7 +594,7 @@ EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); void nf_ct_tmpl_free(struct nf_conn *tmpl) { - nf_ct_ext_destroy(tmpl); + kfree(tmpl->ext); if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) kfree((char *)tmpl - tmpl->proto.tmpl_padto); @@ -1597,7 +1597,17 @@ void nf_conntrack_free(struct nf_conn *ct) */ WARN_ON(refcount_read(&ct->ct_general.use) != 0); - nf_ct_ext_destroy(ct); + if (ct->status & IPS_SRC_NAT_DONE) { + const struct nf_nat_hook *nat_hook; + + rcu_read_lock(); + nat_hook = rcu_dereference(nf_nat_hook); + if (nat_hook) + nat_hook->remove_nat_bysrc(ct); + rcu_read_unlock(); + } + + kfree(ct->ext); kmem_cache_free(nf_conntrack_cachep, ct); cnet = nf_ct_pernet(net); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 69a6cafcb045..6b772b804ee2 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -89,27 +89,6 @@ static __always_inline unsigned int total_extension_size(void) ; } -void nf_ct_ext_destroy(struct nf_conn *ct) -{ - unsigned int i; - struct nf_ct_ext_type *t; - - for (i = 0; i < NF_CT_EXT_NUM; i++) { - rcu_read_lock(); - t = rcu_dereference(nf_ct_ext_types[i]); - - /* Here the nf_ct_ext_type might have been unregisterd. - * I.e., it has responsible to cleanup private - * area in all conntracks when it is unregisterd. - */ - if (t && t->destroy) - t->destroy(ct); - rcu_read_unlock(); - } - - kfree(ct->ext); -} - void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) { unsigned int newlen, newoff, oldlen, alloc; diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 2ff20d6a5afb..8cc31d695e36 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -838,7 +838,7 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data) return i->status & IPS_NAT_MASK ? 1 : 0; } -static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) +static void nf_nat_cleanup_conntrack(struct nf_conn *ct) { unsigned int h; @@ -860,7 +860,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data) * will delete entry from already-freed table. */ if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status)) - __nf_nat_cleanup_conntrack(ct); + nf_nat_cleanup_conntrack(ct); /* don't delete conntrack. Although that would make things a lot * simpler, we'd end up flushing all conntracks on nat rmmod. @@ -868,15 +868,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data) return 0; } -/* No one using conntrack by the time this called. */ -static void nf_nat_cleanup_conntrack(struct nf_conn *ct) -{ - if (ct->status & IPS_SRC_NAT_DONE) - __nf_nat_cleanup_conntrack(ct); -} - static struct nf_ct_ext_type nat_extend __read_mostly = { - .destroy = nf_nat_cleanup_conntrack, .id = NF_CT_EXT_NAT, }; @@ -1171,6 +1163,7 @@ static const struct nf_nat_hook nat_hook = { .decode_session = __nf_nat_decode_session, #endif .manip_pkt = nf_nat_manip_pkt, + .remove_nat_bysrc = nf_nat_cleanup_conntrack, }; static int __init nf_nat_init(void) -- cgit v1.2.3-71-gd317 From 20ff32024624102596f2b4083a17a97ca71d6cd8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Jan 2022 16:09:13 +0100 Subject: netfilter: conntrack: pptp: use single option structure Instead of exposing the four hooks individually use a sinle hook ops structure. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_conntrack_pptp.h | 38 ++++++++---------- net/ipv4/netfilter/nf_nat_pptp.c | 24 +++++------- net/netfilter/nf_conntrack_pptp.c | 60 +++++++++-------------------- 3 files changed, 45 insertions(+), 77 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index a28aa289afdc..c3bdb4370938 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -300,26 +300,22 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; -extern int -(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern int -(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern void -(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig, - struct nf_conntrack_expect *exp_reply); - -extern void -(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, - struct nf_conntrack_expect *exp); +struct nf_nat_pptp_hook { + int (*outbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + int (*inbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + void (*exp_gre)(struct nf_conntrack_expect *exp_orig, + struct nf_conntrack_expect *exp_reply); + void (*expectfn)(struct nf_conn *ct, + struct nf_conntrack_expect *exp); +}; +extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook; #endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 3f248a19faa3..fab357cc8559 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -295,28 +295,24 @@ pptp_inbound_pkt(struct sk_buff *skb, return NF_ACCEPT; } +static const struct nf_nat_pptp_hook pptp_hooks = { + .outbound = pptp_outbound_pkt, + .inbound = pptp_inbound_pkt, + .exp_gre = pptp_exp_gre, + .expectfn = pptp_nat_expected, +}; + static int __init nf_nat_helper_pptp_init(void) { - BUG_ON(nf_nat_pptp_hook_outbound != NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); - - BUG_ON(nf_nat_pptp_hook_inbound != NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); - - BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); + WARN_ON(nf_nat_pptp_hook != NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook, &pptp_hooks); - BUG_ON(nf_nat_pptp_hook_expectfn != NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected); return 0; } static void __exit nf_nat_helper_pptp_fini(void) { - RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL); - RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL); + RCU_INIT_POINTER(nf_nat_pptp_hook, NULL); synchronize_rcu(); } diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 7d5708b92138..f3fa367b455f 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -45,30 +45,8 @@ MODULE_ALIAS_NFCT_HELPER("pptp"); static DEFINE_SPINLOCK(nf_pptp_lock); -int -(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq) __read_mostly; -EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound); - -int -(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq) __read_mostly; -EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound); - -void -(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *expect_orig, - struct nf_conntrack_expect *expect_reply) - __read_mostly; -EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_exp_gre); - -void -(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, - struct nf_conntrack_expect *exp) __read_mostly; -EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); +const struct nf_nat_pptp_hook *nf_nat_pptp_hook; +EXPORT_SYMBOL_GPL(nf_nat_pptp_hook); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ @@ -111,8 +89,8 @@ EXPORT_SYMBOL(pptp_msg_name); static void pptp_expectfn(struct nf_conn *ct, struct nf_conntrack_expect *exp) { + const struct nf_nat_pptp_hook *hook; struct net *net = nf_ct_net(ct); - typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn; pr_debug("increasing timeouts\n"); /* increase timeout of GRE data channel conntrack entry */ @@ -122,9 +100,9 @@ static void pptp_expectfn(struct nf_conn *ct, /* Can you see how rusty this code is, compared with the pre-2.6.11 * one? That's what happened to my shiny newnat of 2002 ;( -HW */ - nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn); - if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) - nf_nat_pptp_expectfn(ct, exp); + hook = rcu_dereference(nf_nat_pptp_hook); + if (hook && ct->master->status & IPS_NAT_MASK) + hook->expectfn(ct, exp); else { struct nf_conntrack_tuple inv_t; struct nf_conntrack_expect *exp_other; @@ -209,9 +187,9 @@ static void pptp_destroy_siblings(struct nf_conn *ct) static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) { struct nf_conntrack_expect *exp_orig, *exp_reply; + const struct nf_nat_pptp_hook *hook; enum ip_conntrack_dir dir; int ret = 1; - typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre; exp_orig = nf_ct_expect_alloc(ct); if (exp_orig == NULL) @@ -239,9 +217,9 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) IPPROTO_GRE, &callid, &peer_callid); exp_reply->expectfn = pptp_expectfn; - nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); - if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) - nf_nat_pptp_exp_gre(exp_orig, exp_reply); + hook = rcu_dereference(nf_nat_pptp_hook); + if (hook && ct->status & IPS_NAT_MASK) + hook->exp_gre(exp_orig, exp_reply); if (nf_ct_expect_related(exp_orig, 0) != 0) goto out_put_both; if (nf_ct_expect_related(exp_reply, 0) != 0) @@ -279,9 +257,9 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = nfct_help_data(ct); + const struct nf_nat_pptp_hook *hook; u_int16_t msg; __be16 cid = 0, pcid = 0; - typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); pr_debug("inbound control message %s\n", pptp_msg_name(msg)); @@ -383,10 +361,9 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, goto invalid; } - nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound); - if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) - return nf_nat_pptp_inbound(skb, ct, ctinfo, - protoff, ctlh, pptpReq); + hook = rcu_dereference(nf_nat_pptp_hook); + if (hook && ct->status & IPS_NAT_MASK) + return hook->inbound(skb, ct, ctinfo, protoff, ctlh, pptpReq); return NF_ACCEPT; invalid: @@ -407,9 +384,9 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = nfct_help_data(ct); + const struct nf_nat_pptp_hook *hook; u_int16_t msg; __be16 cid = 0, pcid = 0; - typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); pr_debug("outbound control message %s\n", pptp_msg_name(msg)); @@ -479,10 +456,9 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, goto invalid; } - nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound); - if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) - return nf_nat_pptp_outbound(skb, ct, ctinfo, - protoff, ctlh, pptpReq); + hook = rcu_dereference(nf_nat_pptp_hook); + if (hook && ct->status & IPS_NAT_MASK) + return hook->outbound(skb, ct, ctinfo, protoff, ctlh, pptpReq); return NF_ACCEPT; invalid: -- cgit v1.2.3-71-gd317 From ac9f0c810684a1b161c18eb4b91ce84cbc13c91d Mon Sep 17 00:00:00 2001 From: Anton Lundin Date: Thu, 3 Feb 2022 10:41:35 +0100 Subject: ata: libata-core: Introduce ATA_HORKAGE_NO_LOG_DIR horkage 06f6c4c6c3e8 ("ata: libata: add missing ata_identify_page_supported() calls") introduced additional calls to ata_identify_page_supported(), thus also adding indirectly accesses to the device log directory log page through ata_log_supported(). Reading this log page causes SATADOM-ML 3ME devices to lock up. Introduce the horkage flag ATA_HORKAGE_NO_LOG_DIR to prevent accesses to the log directory in ata_log_supported() and add a blacklist entry with this flag for "SATADOM-ML 3ME" devices. Fixes: 636f6e2af4fb ("libata: add horkage for missing Identify Device log") Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Anton Lundin Signed-off-by: Damien Le Moal --- drivers/ata/libata-core.c | 10 ++++++++++ include/linux/libata.h | 1 + 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 67f88027680a..e1b1dd215267 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; + if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) + return false; + if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) return false; return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; @@ -4073,6 +4076,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + /* + * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY + * log page is accessed. Ensure we never ask for this log page with + * these devices. + */ + { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR }, + /* End Marker */ { } }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 605756f645be..7f99b4d78822 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -380,6 +380,7 @@ enum { ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */ + ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ -- cgit v1.2.3-71-gd317 From b93235e68921b9acd38ee309953a3a9808105289 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 2 Feb 2022 14:20:31 -0800 Subject: tls: cap the output scatter list to something reasonable TLS recvmsg() passes user pages as destination for decrypt. The decrypt operation is repeated record by record, each record being 16kB, max. TLS allocates an sg_table and uses iov_iter_get_pages() to populate it with enough pages to fit the decrypted record. Even though we decrypt a single message at a time we size the sg_table based on the entire length of the iovec. This leads to unnecessarily large allocations, risking triggering OOM conditions. Use iov_iter_truncate() / iov_iter_reexpand() to construct a "capped" version of iov_iter_npages(). Alternatively we could parametrize iov_iter_npages() to take the size as arg instead of using i->count, or do something else.. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/uio.h | 17 +++++++++++++++++ net/tls/tls_sw.c | 3 ++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/uio.h b/include/linux/uio.h index 1198a2bfc9bf..739285fe5a2f 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -273,6 +273,23 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) i->count = count; } +static inline int +iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) +{ + size_t shorted = 0; + int npages; + + if (iov_iter_count(i) > max_bytes) { + shorted = iov_iter_count(i) - max_bytes; + iov_iter_truncate(i, max_bytes); + } + npages = iov_iter_npages(i, INT_MAX); + if (shorted) + iov_iter_reexpand(i, iov_iter_count(i) + shorted); + + return npages; +} + struct csum_state { __wsum csum; size_t off; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index efc84845bb6b..0024a692f0f8 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1433,7 +1433,8 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, if (*zc && (out_iov || out_sg)) { if (out_iov) - n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; + n_sgout = 1 + + iov_iter_npages_cap(out_iov, INT_MAX, data_len); else n_sgout = sg_nents(out_sg); n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size, -- cgit v1.2.3-71-gd317 From 56b4b5abcdab6daf71c5536fca2772f178590e06 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 2 Feb 2022 17:01:06 +0100 Subject: block: clone crypto and integrity data in __bio_clone_fast __bio_clone_fast should also clone integrity and crypto data, as a clone without those is incomplete. Right now the only caller that can actually support crypto and integrity data (dm) does it manually for the one callchain that supports these, but we better do it properly in the core. Note that all callers except for the above mentioned one also don't need to handle failure at all, given that the integrity and crypto clones are based on mempool allocations that won't fail for sleeping allocations. Signed-off-by: Christoph Hellwig Reviewed-by: Mike Snitzer Link: https://lore.kernel.org/r/20220202160109.108149-11-hch@lst.de Signed-off-by: Jens Axboe --- block/bio-integrity.c | 1 - block/bio.c | 26 +++++++++++++------------- block/blk-crypto.c | 1 - drivers/md/bcache/request.c | 2 +- drivers/md/dm.c | 33 ++++++--------------------------- drivers/md/md-multipath.c | 2 +- include/linux/bio.h | 2 +- 7 files changed, 22 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/block/bio-integrity.c b/block/bio-integrity.c index d25114715459..bd5453220065 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -420,7 +420,6 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, return 0; } -EXPORT_SYMBOL(bio_integrity_clone); int bioset_integrity_create(struct bio_set *bs, int pool_size) { diff --git a/block/bio.c b/block/bio.c index d2f3c1035036..2a921875bb42 100644 --- a/block/bio.c +++ b/block/bio.c @@ -732,6 +732,7 @@ EXPORT_SYMBOL(bio_put); * __bio_clone_fast - clone a bio that shares the original bio's biovec * @bio: destination bio * @bio_src: bio to clone + * @gfp: allocation flags * * Clone a &bio. Caller will own the returned bio, but not * the actual data it points to. Reference count of returned @@ -739,7 +740,7 @@ EXPORT_SYMBOL(bio_put); * * Caller must ensure that @bio_src is not freed before @bio. */ -void __bio_clone_fast(struct bio *bio, struct bio *bio_src) +int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp) { WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs); @@ -761,6 +762,13 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio_clone_blkg_association(bio, bio_src); blkcg_bio_issue_init(bio); + + if (bio_crypt_clone(bio, bio_src, gfp) < 0) + return -ENOMEM; + if (bio_integrity(bio_src) && + bio_integrity_clone(bio, bio_src, gfp) < 0) + return -ENOMEM; + return 0; } EXPORT_SYMBOL(__bio_clone_fast); @@ -780,20 +788,12 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) if (!b) return NULL; - __bio_clone_fast(b, bio); - - if (bio_crypt_clone(b, bio, gfp_mask) < 0) - goto err_put; - - if (bio_integrity(bio) && - bio_integrity_clone(b, bio, gfp_mask) < 0) - goto err_put; + if (__bio_clone_fast(b, bio, gfp_mask < 0)) { + bio_put(b); + return NULL; + } return b; - -err_put: - bio_put(b); - return NULL; } EXPORT_SYMBOL(bio_clone_fast); diff --git a/block/blk-crypto.c b/block/blk-crypto.c index ec9efeeeca91..773dae4c329b 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -111,7 +111,6 @@ int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) *dst->bi_crypt_context = *src->bi_crypt_context; return 0; } -EXPORT_SYMBOL_GPL(__bio_crypt_clone); /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7ba59d08ed87..574b02b94f1a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -686,7 +686,7 @@ static void do_bio_hook(struct search *s, struct bio *bio = &s->bio.bio; bio_init(bio, NULL, NULL, 0, 0); - __bio_clone_fast(bio, orig_bio); + __bio_clone_fast(bio, orig_bio, GFP_NOIO); /* * bi_end_io can be set separately somewhere else, e.g. the * variants in, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 78df75f57288..0f8796159379 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -561,7 +561,12 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, tio = clone_to_tio(clone); tio->inside_dm_io = false; } - __bio_clone_fast(&tio->clone, ci->bio); + + if (__bio_clone_fast(&tio->clone, ci->bio, gfp_mask) < 0) { + if (ci->io->tio.io) + bio_put(&tio->clone); + return NULL; + } tio->magic = DM_TIO_MAGIC; tio->io = ci->io; @@ -1196,31 +1201,8 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, sector_t sector, unsigned *len) { struct bio *bio = ci->bio, *clone; - int r; clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); - - r = bio_crypt_clone(clone, bio, GFP_NOIO); - if (r < 0) - goto free_tio; - - if (bio_integrity(bio)) { - struct dm_target_io *tio = clone_to_tio(clone); - - if (unlikely(!dm_target_has_integrity(tio->ti->type) && - !dm_target_passes_integrity(tio->ti->type))) { - DMWARN("%s: the target %s doesn't support integrity data.", - dm_device_name(tio->io->md), - tio->ti->type->name); - r = -EIO; - goto free_tio; - } - - r = bio_integrity_clone(clone, bio, GFP_NOIO); - if (r < 0) - goto free_tio; - } - bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); clone->bi_iter.bi_size = to_bytes(*len); @@ -1229,9 +1211,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, __map_bio(clone); return 0; -free_tio: - free_tio(clone); - return r; } static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 5e15940634d8..010c759c741a 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -122,7 +122,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) multipath = conf->multipaths + mp_bh->path; bio_init(&mp_bh->bio, NULL, NULL, 0, 0); - __bio_clone_fast(&mp_bh->bio, bio); + __bio_clone_fast(&mp_bh->bio, bio, GFP_NOIO); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); diff --git a/include/linux/bio.h b/include/linux/bio.h index 18cfe5bb41ea..b814361c957b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -413,7 +413,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); -extern void __bio_clone_fast(struct bio *, struct bio *); +int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio_set fs_bio_set; -- cgit v1.2.3-71-gd317 From abfc426d1b2fb2176df59851a64223b58ddae7e7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 2 Feb 2022 17:01:09 +0100 Subject: block: pass a block_device to bio_clone_fast Pass a block_device to bio_clone_fast and __bio_clone_fast and give the functions more suitable names. Signed-off-by: Christoph Hellwig Reviewed-by: Mike Snitzer Link: https://lore.kernel.org/r/20220202160109.108149-14-hch@lst.de Signed-off-by: Jens Axboe --- Documentation/block/biodoc.rst | 5 ----- block/bio.c | 31 ++++++++++++++++++------------- block/blk-mq.c | 4 ++-- block/bounce.c | 3 +-- drivers/block/drbd/drbd_req.c | 4 ++-- drivers/block/drbd/drbd_worker.c | 4 ++-- drivers/block/pktcdvd.c | 4 ++-- drivers/md/bcache/request.c | 5 +++-- drivers/md/dm-cache-target.c | 4 ++-- drivers/md/dm-crypt.c | 11 +++++------ drivers/md/dm-zoned-target.c | 3 +-- drivers/md/dm.c | 6 +++--- drivers/md/md-faulty.c | 4 ++-- drivers/md/md-multipath.c | 3 +-- drivers/md/md.c | 5 +++-- drivers/md/raid1.c | 34 +++++++++++++++++----------------- drivers/md/raid10.c | 16 ++++++++-------- drivers/md/raid5.c | 4 ++-- fs/btrfs/extent_io.c | 4 ++-- include/linux/bio.h | 6 ++++-- 20 files changed, 80 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst index 2098477851a4..4fbc367e62f9 100644 --- a/Documentation/block/biodoc.rst +++ b/Documentation/block/biodoc.rst @@ -663,11 +663,6 @@ to i/o submission, if the bio fields are likely to be accessed after the i/o is issued (since the bio may otherwise get freed in case i/o completion happens in the meantime). -The bio_clone_fast() routine may be used to duplicate a bio, where the clone -shares the bio_vec_list with the original bio (i.e. both point to the -same bio_vec_list). This would typically be used for splitting i/o requests -in lvm or md. - 3.2 Generic bio helper Routines ------------------------------- diff --git a/block/bio.c b/block/bio.c index 74f66e22ef63..18d34b33351b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -733,7 +733,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) bio_set_flag(bio, BIO_CLONED); if (bio_flagged(bio_src, BIO_THROTTLED)) bio_set_flag(bio, BIO_THROTTLED); - if (bio_flagged(bio_src, BIO_REMAPPED)) + if (bio->bi_bdev == bio_src->bi_bdev && + bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; @@ -751,7 +752,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) } /** - * bio_clone_fast - clone a bio that shares the original bio's biovec + * bio_alloc_clone - clone a bio that shares the original bio's biovec + * @bdev: block_device to clone onto * @bio_src: bio to clone from * @gfp: allocation priority * @bs: bio_set to allocate from @@ -761,11 +763,12 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) * * The caller must ensure that the return bio is not freed before @bio_src. */ -struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs) +struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, + gfp_t gfp, struct bio_set *bs) { struct bio *bio; - bio = bio_alloc_bioset(bio_src->bi_bdev, 0, bio_src->bi_opf, gfp, bs); + bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); if (!bio) return NULL; @@ -777,10 +780,11 @@ struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs) return bio; } -EXPORT_SYMBOL(bio_clone_fast); +EXPORT_SYMBOL(bio_alloc_clone); /** - * __bio_clone_fast - clone a bio that shares the original bio's biovec + * bio_init_clone - clone a bio that shares the original bio's biovec + * @bdev: block_device to clone onto * @bio: bio to clone into * @bio_src: bio to clone from * @gfp: allocation priority @@ -790,17 +794,18 @@ EXPORT_SYMBOL(bio_clone_fast); * * The caller must ensure that @bio_src is not freed before @bio. */ -int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp) +int bio_init_clone(struct block_device *bdev, struct bio *bio, + struct bio *bio_src, gfp_t gfp) { int ret; - bio_init(bio, bio_src->bi_bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); + bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); ret = __bio_clone(bio, bio_src, gfp); if (ret) bio_uninit(bio); return ret; } -EXPORT_SYMBOL(__bio_clone_fast); +EXPORT_SYMBOL(bio_init_clone); const char *bio_devname(struct bio *bio, char *buf) { @@ -1572,7 +1577,7 @@ struct bio *bio_split(struct bio *bio, int sectors, if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) return NULL; - split = bio_clone_fast(bio, gfp, bs); + split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) return NULL; @@ -1667,9 +1672,9 @@ EXPORT_SYMBOL(bioset_exit); * Note that the bio must be embedded at the END of that structure always, * or things will break badly. * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated - * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). - * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to - * dispatch queued requests when the mempool runs out of space. + * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). + * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used + * to dispatch queued requests when the mempool runs out of space. * */ int bioset_init(struct bio_set *bs, diff --git a/block/blk-mq.c b/block/blk-mq.c index 1adfe4824ef5..4b868e792ba4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2975,10 +2975,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, bs = &fs_bio_set; __rq_for_each_bio(bio_src, rq_src) { - bio = bio_clone_fast(bio_src, gfp_mask, bs); + bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, + bs); if (!bio) goto free_and_out; - bio->bi_bdev = rq->q->disk->part0; if (bio_ctr && bio_ctr(bio, bio_src, data)) goto free_and_out; diff --git a/block/bounce.c b/block/bounce.c index 330ddde25b46..3fd3bc6fd5db 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -162,8 +162,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src) * that does not own the bio - reason being drivers don't use it for * iterating over the biovec anymore, so expecting it to be kept up * to date (i.e. for clones that share the parent biovec) is just - * asking for trouble and would force extra work on - * __bio_clone_fast() anyways. + * asking for trouble and would force extra work. */ bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), bio_src->bi_opf, GFP_NOIO, &bounce_bio_set); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8d44e96c4c4e..c00ae8619519 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -30,8 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio return NULL; memset(req, 0, sizeof(*req)); - req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set); - bio_set_dev(req->private_bio, device->ldev->backing_bdev); + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src, + GFP_NOIO, &drbd_io_bio_set); req->private_bio->bi_private = req; req->private_bio->bi_end_io = drbd_request_endio; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 64563bfdf0da..a5e04b38006b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) drbd_al_begin_io(device, &req->i); - req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO, + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, + req->master_bio, GFP_NOIO, &drbd_io_bio_set); - bio_set_dev(req->private_bio, device->ldev->backing_bdev); req->private_bio->bi_private = req; req->private_bio->bi_end_io = drbd_request_endio; submit_bio_noacct(req->private_bio); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 3aa595442946..be749c686feb 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2294,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio) static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) { - struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set); + struct bio *cloned_bio = + bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set); struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); psd->pd = pd; psd->bio = bio; - bio_set_dev(cloned_bio, pd->bdev); cloned_bio->bi_private = psd; cloned_bio->bi_end_io = pkt_end_io_read_cloned; pd->stats.secs_r += bio_sectors(bio); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index d2cb853bf917..6869e010475a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s, { struct bio *bio = &s->bio.bio; - __bio_clone_fast(bio, orig_bio, GFP_NOIO); + bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO); /* * bi_end_io can be set separately somewhere else, e.g. the * variants in, @@ -1036,7 +1036,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) closure_bio_submit(s->iop.c, flush, cl); } } else { - s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); + s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &dc->disk.bio_split); /* I/O request sent to backing device */ bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1c37fe028e53..89fdfb49d564 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -819,13 +819,13 @@ static void issue_op(struct bio *bio, void *context) static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { - struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs); + struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio, + GFP_NOIO, &cache->bs); BUG_ON(!origin_bio); bio_chain(origin_bio, bio); - remap_to_origin(cache, origin_bio); if (bio_data_dir(origin_bio) == WRITE) clear_discard(cache, oblock_to_dblock(cache, oblock)); submit_bio(origin_bio); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f7e4435b7439..a5006cb6ee8a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1834,17 +1834,16 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) struct bio *clone; /* - * We need the original biovec array in order to decrypt - * the whole bio data *afterwards* -- thanks to immutable - * biovecs we don't need to worry about the block layer - * modifying the biovec array; so leverage bio_clone_fast(). + * We need the original biovec array in order to decrypt the whole bio + * data *afterwards* -- thanks to immutable biovecs we don't need to + * worry about the block layer modifying the biovec array; so leverage + * bio_alloc_clone(). */ - clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); + clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs); if (!clone) return 1; clone->bi_private = io; clone->bi_end_io = crypt_endio; - bio_set_dev(clone, cc->dev->bdev); crypt_inc_pending(io); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 166c4e9d99c9..a3f6d3ef3817 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, if (dev->flags & DMZ_BDEV_DYING) return -EIO; - clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); + clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set); if (!clone) return -ENOMEM; - bio_set_dev(clone, dev->bdev); bioctx->dev = dev; clone->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 862564a5df74..ab9cc91931f9 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -520,7 +520,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_clone_fast(bio, GFP_NOIO, &md->io_bs); + clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); tio = clone_to_tio(clone); tio->inside_dm_io = true; @@ -553,8 +553,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, /* the dm_target_io embedded in ci->io is available */ tio = &ci->io->tio; } else { - struct bio *clone = bio_clone_fast(ci->bio, gfp_mask, - &ci->io->md->bs); + struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio, + gfp_mask, &ci->io->md->bs); if (!clone) return NULL; diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c index c0dc6f2ef4a3..50ad818978a4 100644 --- a/drivers/md/md-faulty.c +++ b/drivers/md/md-faulty.c @@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) } } if (failit) { - struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); - bio_set_dev(b, conf->rdev->bdev); b->bi_private = bio; b->bi_end_io = faulty_fail; bio = b; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 483a5500f83c..97fb948e3e74 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -121,10 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) } multipath = conf->multipaths + mp_bh->path; - __bio_clone_fast(&mp_bh->bio, bio, GFP_NOIO); + bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO); mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; - bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; diff --git a/drivers/md/md.c b/drivers/md/md.c index 0a89f072dae0..f88a9e948f3e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8634,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio) */ void md_account_bio(struct mddev *mddev, struct bio **bio) { + struct block_device *bdev = (*bio)->bi_bdev; struct md_io_acct *md_io_acct; struct bio *clone; - if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue)) + if (!blk_queue_io_stat(bdev->bd_disk->queue)) return; - clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set); + clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); md_io_acct = container_of(clone, struct md_io_acct, bio_clone); md_io_acct->orig_bio = *bio; md_io_acct->start_time = bio_start_io_acct(*bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e7710fb5befb..c3288d46948d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1320,13 +1320,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) r1_bio->start_time = bio_start_io_acct(bio); - read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); + read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp, + &mddev->bio_set); r1_bio->bios[rdisk] = read_bio; read_bio->bi_iter.bi_sector = r1_bio->sector + mirror->rdev->data_offset; - bio_set_dev(read_bio, mirror->rdev->bdev); read_bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &mirror->rdev->flags) && @@ -1546,24 +1546,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, first_clone = 0; } - if (r1_bio->behind_master_bio) - mbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, &mddev->bio_set); - else - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); - if (r1_bio->behind_master_bio) { + mbio = bio_alloc_clone(rdev->bdev, + r1_bio->behind_master_bio, + GFP_NOIO, &mddev->bio_set); if (test_bit(CollisionCheck, &rdev->flags)) wait_for_serialization(rdev, r1_bio); if (test_bit(WriteMostly, &rdev->flags)) atomic_inc(&r1_bio->behind_remaining); - } else if (mddev->serialize_policy) - wait_for_serialization(rdev, r1_bio); + } else { + mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); + + if (mddev->serialize_policy) + wait_for_serialization(rdev, r1_bio); + } r1_bio->bios[i] = mbio; mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); - bio_set_dev(mbio, rdev->bdev); mbio->bi_end_io = raid1_end_write_request; mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); if (test_bit(FailFast, &rdev->flags) && @@ -2416,12 +2417,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) /* Write at 'sector' for 'sectors'*/ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - wbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, - &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, + r1_bio->behind_master_bio, + GFP_NOIO, &mddev->bio_set); } else { - wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, - &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio, + GFP_NOIO, &mddev->bio_set); } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); @@ -2430,7 +2431,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; - bio_set_dev(wbio, rdev->bdev); if (submit_bio_wait(wbio) < 0) /* failure! */ diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index da07bcbc06d0..5dd2e17e1d0e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1208,14 +1208,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) r10_bio->start_time = bio_start_io_acct(bio); - read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); + read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); - bio_set_dev(read_bio, rdev->bdev); read_bio->bi_end_io = raid10_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); if (test_bit(FailFast, &rdev->flags) && @@ -1255,7 +1254,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, } else rdev = conf->mirrors[devnum].rdev; - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set); if (replacement) r10_bio->devs[n_copy].repl_bio = mbio; else @@ -1263,7 +1262,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + choose_data_offset(r10_bio, rdev)); - bio_set_dev(mbio, rdev->bdev); mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); if (!replacement && test_bit(FailFast, @@ -1812,7 +1810,8 @@ retry_discard: */ if (r10_bio->devs[disk].bio) { struct md_rdev *rdev = conf->mirrors[disk].rdev; - mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &mddev->bio_set); mbio->bi_end_io = raid10_end_discard_request; mbio->bi_private = r10_bio; r10_bio->devs[disk].bio = mbio; @@ -1825,7 +1824,8 @@ retry_discard: } if (r10_bio->devs[disk].repl_bio) { struct md_rdev *rrdev = conf->mirrors[disk].replacement; - rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, + &mddev->bio_set); rbio->bi_end_io = raid10_end_discard_request; rbio->bi_private = r10_bio; r10_bio->devs[disk].repl_bio = rbio; @@ -2892,12 +2892,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) if (sectors > sect_to_write) sectors = sect_to_write; /* Write at 'sector' for 'sectors' */ - wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); + wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, + &mddev->bio_set); bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); wbio->bi_iter.bi_sector = wsector + choose_data_offset(r10_bio, rdev); - bio_set_dev(wbio, rdev->bdev); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (submit_bio_wait(wbio) < 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7c119208a214..8891aaba6596 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5438,14 +5438,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) return 0; } - align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set); + align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO, + &mddev->io_acct_set); md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone); raid_bio->bi_next = (void *)rdev; if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue)) md_io_acct->start_time = bio_start_io_acct(raid_bio); md_io_acct->orig_bio = raid_bio; - bio_set_dev(align_bio, rdev->bdev); align_bio->bi_end_io = raid5_align_endio; align_bio->bi_private = md_io_acct; align_bio->bi_iter.bi_sector = sector; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 421d921a0571..dee86911a4be 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3154,7 +3154,7 @@ struct bio *btrfs_bio_clone(struct bio *bio) struct bio *new; /* Bio allocation backed by a bioset does not fail */ - new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset); + new = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOFS, &btrfs_bioset); bbio = btrfs_bio(new); btrfs_bio_init(bbio); bbio->iter = bio->bi_iter; @@ -3169,7 +3169,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size) ASSERT(offset <= UINT_MAX && size <= UINT_MAX); /* this will never fail when it's backed by a bioset */ - bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset); + bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset); ASSERT(bio); bbio = btrfs_bio(bio); diff --git a/include/linux/bio.h b/include/linux/bio.h index b814361c957b..7523aba4ddf7 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -413,8 +413,10 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); -int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp); -extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); +struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, + gfp_t gfp, struct bio_set *bs); +int bio_init_clone(struct block_device *bdev, struct bio *bio, + struct bio *bio_src, gfp_t gfp); extern struct bio_set fs_bio_set; -- cgit v1.2.3-71-gd317 From 916acbf6b4b9262df7de1d2b6208a4efa209a88f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 3 Feb 2022 16:45:21 +0200 Subject: serial: core: Fix the definition name in the comment of UPF_* flags From day 1 the UPF_LAST_USER wasn't defined, a specific number of the last bit for userspace. Instead the code always relies on ASYNCB_LAST_USER. Fix comment accordingly. Fixes: 904326ecac02 ("tty,serial: Unify UPF_* and ASYNC_* flag definitions") Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220203144521.16457-1-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/serial_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index c58cc142d23f..31f7fe527395 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -171,7 +171,7 @@ struct uart_port { * assigned from the serial_struct flags in uart_set_info() * [for bit definitions in the UPF_CHANGE_MASK] * - * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable + * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable * The remaining bits are serial-core specific and not modifiable by * userspace. */ -- cgit v1.2.3-71-gd317 From 84564481bc4520c47e7fe9c594c0523d81e6a97a Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Fri, 7 Jan 2022 08:44:25 +0100 Subject: mux: Add support for reading mux state from consumer DT node In some cases, we might need to provide the state of the mux to be set for the operation of a given peripheral. Therefore, pass this information using mux-states property. Link: https://lore.kernel.org/lkml/20211123081222.27979-1-a-govindraju@ti.com Signed-off-by: Aswath Govindraju Signed-off-by: Peter Rosin (minor edits) Link: https://lore.kernel.org/r/aac25be8-9515-a980-f7cb-709938c84822@axentia.se Signed-off-by: Greg Kroah-Hartman --- Documentation/driver-api/driver-model/devres.rst | 1 + drivers/mux/core.c | 219 ++++++++++++++++++++--- include/linux/mux/consumer.h | 18 ++ 3 files changed, 217 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 148e19381b79..5018403fe82f 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -368,6 +368,7 @@ MUX devm_mux_chip_alloc() devm_mux_chip_register() devm_mux_control_get() + devm_mux_state_get() NET devm_alloc_etherdev() diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 22f4709768d1..931fa8bd4962 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -29,6 +29,20 @@ */ #define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS +/** + * struct mux_state - Represents a mux controller state specific to a given + * consumer. + * @mux: Pointer to a mux controller. + * @state State of the mux to be selected. + * + * This structure is specific to the consumer that acquires it and has + * information specific to that consumer. + */ +struct mux_state { + struct mux_control *mux; + unsigned int state; +}; + static struct class mux_class = { .name = "mux", .owner = THIS_MODULE, @@ -341,7 +355,8 @@ static void mux_control_delay(struct mux_control *mux, unsigned int delay_us) * On successfully selecting the mux-control state, it will be locked until * there is a call to mux_control_deselect(). If the mux-control is already * selected when mux_control_select() is called, the caller will be blocked - * until mux_control_deselect() is called (by someone else). + * until mux_control_deselect() or mux_state_deselect() is called (by someone + * else). * * Therefore, make sure to call mux_control_deselect() when the operation is * complete and the mux-control is free for others to use, but do not call @@ -370,6 +385,30 @@ int mux_control_select_delay(struct mux_control *mux, unsigned int state, } EXPORT_SYMBOL_GPL(mux_control_select_delay); +/** + * mux_state_select_delay() - Select the given multiplexer state. + * @mstate: The mux-state to select. + * @delay_us: The time to delay (in microseconds) if the mux state is changed. + * + * On successfully selecting the mux-state, its mux-control will be locked + * until there is a call to mux_state_deselect(). If the mux-control is already + * selected when mux_state_select() is called, the caller will be blocked + * until mux_state_deselect() or mux_control_deselect() is called (by someone + * else). + * + * Therefore, make sure to call mux_state_deselect() when the operation is + * complete and the mux-control is free for others to use, but do not call + * mux_state_deselect() if mux_state_select() fails. + * + * Return: 0 when the mux-state has been selected or a negative + * errno on error. + */ +int mux_state_select_delay(struct mux_state *mstate, unsigned int delay_us) +{ + return mux_control_select_delay(mstate->mux, mstate->state, delay_us); +} +EXPORT_SYMBOL_GPL(mux_state_select_delay); + /** * mux_control_try_select_delay() - Try to select the given multiplexer state. * @mux: The mux-control to request a change of state from. @@ -405,6 +444,27 @@ int mux_control_try_select_delay(struct mux_control *mux, unsigned int state, } EXPORT_SYMBOL_GPL(mux_control_try_select_delay); +/** + * mux_state_try_select_delay() - Try to select the given multiplexer state. + * @mstate: The mux-state to select. + * @delay_us: The time to delay (in microseconds) if the mux state is changed. + * + * On successfully selecting the mux-state, its mux-control will be locked + * until mux_state_deselect() is called. + * + * Therefore, make sure to call mux_state_deselect() when the operation is + * complete and the mux-control is free for others to use, but do not call + * mux_state_deselect() if mux_state_try_select() fails. + * + * Return: 0 when the mux-state has been selected or a negative errno on + * error. Specifically -EBUSY if the mux-control is contended. + */ +int mux_state_try_select_delay(struct mux_state *mstate, unsigned int delay_us) +{ + return mux_control_try_select_delay(mstate->mux, mstate->state, delay_us); +} +EXPORT_SYMBOL_GPL(mux_state_try_select_delay); + /** * mux_control_deselect() - Deselect the previously selected multiplexer state. * @mux: The mux-control to deselect. @@ -431,6 +491,24 @@ int mux_control_deselect(struct mux_control *mux) } EXPORT_SYMBOL_GPL(mux_control_deselect); +/** + * mux_state_deselect() - Deselect the previously selected multiplexer state. + * @mstate: The mux-state to deselect. + * + * It is required that a single call is made to mux_state_deselect() for + * each and every successful call made to either of mux_state_select() or + * mux_state_try_select(). + * + * Return: 0 on success and a negative errno on error. An error can only + * occur if the mux has an idle state. Note that even if an error occurs, the + * mux-control is unlocked and is thus free for the next access. + */ +int mux_state_deselect(struct mux_state *mstate) +{ + return mux_control_deselect(mstate->mux); +} +EXPORT_SYMBOL_GPL(mux_state_deselect); + /* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { @@ -441,14 +519,17 @@ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) return dev ? to_mux_chip(dev) : NULL; } -/** - * mux_control_get() - Get the mux-control for a device. +/* + * mux_get() - Get the mux-control for a device. * @dev: The device that needs a mux-control. * @mux_name: The name identifying the mux-control. + * @state: Pointer to where the requested state is returned, or NULL when + * the required multiplexer states are handled by other means. * * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno. */ -struct mux_control *mux_control_get(struct device *dev, const char *mux_name) +static struct mux_control *mux_get(struct device *dev, const char *mux_name, + unsigned int *state) { struct device_node *np = dev->of_node; struct of_phandle_args args; @@ -458,8 +539,12 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) int ret; if (mux_name) { - index = of_property_match_string(np, "mux-control-names", - mux_name); + if (state) + index = of_property_match_string(np, "mux-state-names", + mux_name); + else + index = of_property_match_string(np, "mux-control-names", + mux_name); if (index < 0) { dev_err(dev, "mux controller '%s' not found\n", mux_name); @@ -467,12 +552,17 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) } } - ret = of_parse_phandle_with_args(np, - "mux-controls", "#mux-control-cells", - index, &args); + if (state) + ret = of_parse_phandle_with_args(np, + "mux-states", "#mux-state-cells", + index, &args); + else + ret = of_parse_phandle_with_args(np, + "mux-controls", "#mux-control-cells", + index, &args); if (ret) { - dev_err(dev, "%pOF: failed to get mux-control %s(%i)\n", - np, mux_name ?: "", index); + dev_err(dev, "%pOF: failed to get mux-%s %s(%i)\n", + np, state ? "state" : "control", mux_name ?: "", index); return ERR_PTR(ret); } @@ -481,17 +571,35 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) if (!mux_chip) return ERR_PTR(-EPROBE_DEFER); - if (args.args_count > 1 || - (!args.args_count && (mux_chip->controllers > 1))) { - dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", - np, args.np); - put_device(&mux_chip->dev); - return ERR_PTR(-EINVAL); - } - controller = 0; - if (args.args_count) - controller = args.args[0]; + if (state) { + if (args.args_count > 2 || args.args_count == 0 || + (args.args_count < 2 && mux_chip->controllers > 1)) { + dev_err(dev, "%pOF: wrong #mux-state-cells for %pOF\n", + np, args.np); + put_device(&mux_chip->dev); + return ERR_PTR(-EINVAL); + } + + if (args.args_count == 2) { + controller = args.args[0]; + *state = args.args[1]; + } else { + *state = args.args[0]; + } + + } else { + if (args.args_count > 1 || + (!args.args_count && mux_chip->controllers > 1)) { + dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", + np, args.np); + put_device(&mux_chip->dev); + return ERR_PTR(-EINVAL); + } + + if (args.args_count) + controller = args.args[0]; + } if (controller >= mux_chip->controllers) { dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", @@ -502,6 +610,18 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) return &mux_chip->mux[controller]; } + +/** + * mux_control_get() - Get the mux-control for a device. + * @dev: The device that needs a mux-control. + * @mux_name: The name identifying the mux-control. + * + * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno. + */ +struct mux_control *mux_control_get(struct device *dev, const char *mux_name) +{ + return mux_get(dev, mux_name, NULL); +} EXPORT_SYMBOL_GPL(mux_control_get); /** @@ -553,6 +673,63 @@ struct mux_control *devm_mux_control_get(struct device *dev, } EXPORT_SYMBOL_GPL(devm_mux_control_get); +/* + * mux_state_put() - Put away the mux-state for good. + * @mstate: The mux-state to put away. + * + * mux_state_put() reverses the effects of mux_state_get(). + */ +static void mux_state_put(struct mux_state *mstate) +{ + mux_control_put(mstate->mux); + kfree(mstate); +} + +static void devm_mux_state_release(struct device *dev, void *res) +{ + struct mux_state *mstate = *(struct mux_state **)res; + + mux_state_put(mstate); +} + +/** + * devm_mux_state_get() - Get the mux-state for a device, with resource + * management. + * @dev: The device that needs a mux-control. + * @mux_name: The name identifying the mux-control. + * + * Return: Pointer to the mux-state, or an ERR_PTR with a negative errno. + */ +struct mux_state *devm_mux_state_get(struct device *dev, + const char *mux_name) +{ + struct mux_state **ptr, *mstate; + struct mux_control *mux_ctrl; + int state; + + mstate = devm_kzalloc(dev, sizeof(struct mux_state), GFP_KERNEL); + if (!mstate) + return ERR_PTR(-ENOMEM); + + ptr = devres_alloc(devm_mux_state_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + mux_ctrl = mux_get(dev, mux_name, &state); + if (IS_ERR(mux_ctrl)) { + devres_free(ptr); + return (struct mux_state *)mux_ctrl; + } + + mstate->mux = mux_ctrl; + mstate->state = state; + *ptr = mstate; + devres_add(dev, ptr); + + return mstate; +} +EXPORT_SYMBOL_GPL(devm_mux_state_get); + /* * Using subsys_initcall instead of module_init here to try to ensure - for * the non-modular case - that the subsystem is initialized when mux consumers diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h index 7a09b040ac39..2e25c838f831 100644 --- a/include/linux/mux/consumer.h +++ b/include/linux/mux/consumer.h @@ -14,14 +14,19 @@ struct device; struct mux_control; +struct mux_state; unsigned int mux_control_states(struct mux_control *mux); int __must_check mux_control_select_delay(struct mux_control *mux, unsigned int state, unsigned int delay_us); +int __must_check mux_state_select_delay(struct mux_state *mstate, + unsigned int delay_us); int __must_check mux_control_try_select_delay(struct mux_control *mux, unsigned int state, unsigned int delay_us); +int __must_check mux_state_try_select_delay(struct mux_state *mstate, + unsigned int delay_us); static inline int __must_check mux_control_select(struct mux_control *mux, unsigned int state) @@ -29,18 +34,31 @@ static inline int __must_check mux_control_select(struct mux_control *mux, return mux_control_select_delay(mux, state, 0); } +static inline int __must_check mux_state_select(struct mux_state *mstate) +{ + return mux_state_select_delay(mstate, 0); +} + static inline int __must_check mux_control_try_select(struct mux_control *mux, unsigned int state) { return mux_control_try_select_delay(mux, state, 0); } +static inline int __must_check mux_state_try_select(struct mux_state *mstate) +{ + return mux_state_try_select_delay(mstate, 0); +} + int mux_control_deselect(struct mux_control *mux); +int mux_state_deselect(struct mux_state *mstate); struct mux_control *mux_control_get(struct device *dev, const char *mux_name); void mux_control_put(struct mux_control *mux); struct mux_control *devm_mux_control_get(struct device *dev, const char *mux_name); +struct mux_state *devm_mux_state_get(struct device *dev, + const char *mux_name); #endif /* _LINUX_MUX_CONSUMER_H */ -- cgit v1.2.3-71-gd317 From bb6e8c28414335a551da5973d44cc537f7abe65a Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Wed, 12 Jan 2022 08:00:53 -0800 Subject: firmware_loader: simplfy builtin or module check The existing check is outdated and confuses developers. Use the already existing IS_REACHABLE() defined on kconfig.h which makes the intention much clearer. Cc: Randy Dunlap Cc: Masahiro Yamada Reported-by: Borislav Petkov Reported-by: Greg Kroah-Hartman Suggested-by: Masahiro Yamada Signed-off-by: Luis Chamberlain Ackd-by: Randy Dunlap Link: https://lore.kernel.org/r/20220112160053.723795-1-mcgrof@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/firmware.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 3b057dfc8284..ec2ccfebef65 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -34,7 +34,7 @@ static inline bool firmware_request_builtin(struct firmware *fw, } #endif -#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) +#if IS_REACHABLE(CONFIG_FW_LOADER) int request_firmware(const struct firmware **fw, const char *name, struct device *device); int firmware_request_nowarn(const struct firmware **fw, const char *name, -- cgit v1.2.3-71-gd317 From bed89478934a9803d1a4d97574dcc30e509aa972 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 2 Feb 2022 10:49:38 +0200 Subject: ieee80211: fix -Wcast-qual warnings When enabling -Wcast-qual e.g. via W=3, we get a lot of warnings from this file, whenever it's included. Since the fixes are simple, just do that. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20220202104617.79ec4a4bab29.I8177a0c79d656c552e22c88931d8da06f2977896@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 559b6c644938..60ee7b3f58e7 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2427,7 +2427,7 @@ struct ieee80211_tx_pwr_env { static inline u8 ieee80211_he_oper_size(const u8 *he_oper_ie) { - struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; + const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie; u8 oper_len = sizeof(struct ieee80211_he_operation); u32 he_oper_params; @@ -2460,7 +2460,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) static inline const struct ieee80211_he_6ghz_oper * ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) { - const u8 *ret = (void *)&he_oper->optional; + const u8 *ret = (const void *)&he_oper->optional; u32 he_oper_params; if (!he_oper) @@ -2475,7 +2475,7 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) ret++; - return (void *)ret; + return (const void *)ret; } /* HE Spatial Reuse defines */ @@ -2496,7 +2496,7 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) static inline u8 ieee80211_he_spr_size(const u8 *he_spr_ie) { - struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; + const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie; u8 spr_len = sizeof(struct ieee80211_he_spr); u8 he_spr_params; -- cgit v1.2.3-71-gd317 From b9794a822281944ef3de5b1812a94cbdb8134320 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Fri, 28 Jan 2022 17:35:33 +0100 Subject: powercap/drivers/dtpm: Convert the init table section to a simple array The init table section is freed after the system booted. However the next changes will make per module the DTPM description, so the table won't be accessible when the module is loaded. In order to fix that, we should move the table to the data section where there are very few entries and that makes strange to add it there. The main goal of the table was to keep self-encapsulated code and we can keep it almost as it by using an array instead. Suggested-by: Ulf Hansson Reviewed-by: Ulf Hansson Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20220128163537.212248-2-daniel.lezcano@linaro.org --- drivers/powercap/dtpm.c | 2 ++ drivers/powercap/dtpm_cpu.c | 5 ++++- drivers/powercap/dtpm_subsys.h | 18 ++++++++++++++++++ include/asm-generic/vmlinux.lds.h | 11 ----------- include/linux/dtpm.h | 24 +++--------------------- 5 files changed, 27 insertions(+), 33 deletions(-) create mode 100644 drivers/powercap/dtpm_subsys.h (limited to 'include/linux') diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index 8cb45f2d3d78..0e5c93443c70 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -24,6 +24,8 @@ #include #include +#include "dtpm_subsys.h" + #define DTPM_POWER_LIMIT_FLAG 0 static const char *constraint_name[] = { diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index b740866b228d..5763e0ce2af5 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -269,4 +269,7 @@ static int __init dtpm_cpu_init(void) return 0; } -DTPM_DECLARE(dtpm_cpu, dtpm_cpu_init); +struct dtpm_subsys_ops dtpm_cpu_ops = { + .name = KBUILD_MODNAME, + .init = dtpm_cpu_init, +}; diff --git a/drivers/powercap/dtpm_subsys.h b/drivers/powercap/dtpm_subsys.h new file mode 100644 index 000000000000..2a3a2055f60e --- /dev/null +++ b/drivers/powercap/dtpm_subsys.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 Linaro Ltd + * + * Author: Daniel Lezcano + */ +#ifndef ___DTPM_SUBSYS_H__ +#define ___DTPM_SUBSYS_H__ + +extern struct dtpm_subsys_ops dtpm_cpu_ops; + +struct dtpm_subsys_ops *dtpm_subsys[] = { +#ifdef CONFIG_DTPM_CPU + &dtpm_cpu_ops, +#endif +}; + +#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 42f3866bca69..2a10db2f0bc5 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -321,16 +321,6 @@ #define THERMAL_TABLE(name) #endif -#ifdef CONFIG_DTPM -#define DTPM_TABLE() \ - . = ALIGN(8); \ - __dtpm_table = .; \ - KEEP(*(__dtpm_table)) \ - __dtpm_table_end = .; -#else -#define DTPM_TABLE() -#endif - #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ @@ -723,7 +713,6 @@ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ THERMAL_TABLE(governor) \ - DTPM_TABLE() \ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() \ diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index d37e5d06a357..506048158a50 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -32,29 +32,11 @@ struct dtpm_ops { void (*release)(struct dtpm *); }; -typedef int (*dtpm_init_t)(void); - -struct dtpm_descr { - dtpm_init_t init; +struct dtpm_subsys_ops { + const char *name; + int (*init)(void); }; -/* Init section thermal table */ -extern struct dtpm_descr __dtpm_table[]; -extern struct dtpm_descr __dtpm_table_end[]; - -#define DTPM_TABLE_ENTRY(name, __init) \ - static struct dtpm_descr __dtpm_table_entry_##name \ - __used __section("__dtpm_table") = { \ - .init = __init, \ - } - -#define DTPM_DECLARE(name, init) DTPM_TABLE_ENTRY(name, init) - -#define for_each_dtpm_table(__dtpm) \ - for (__dtpm = __dtpm_table; \ - __dtpm < __dtpm_table_end; \ - __dtpm++) - static inline struct dtpm *to_dtpm(struct powercap_zone *zone) { return container_of(zone, struct dtpm, zone); -- cgit v1.2.3-71-gd317 From 3759ec678e8944dc2ea70cab77a300408f78ae27 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Fri, 28 Jan 2022 17:35:34 +0100 Subject: powercap/drivers/dtpm: Add hierarchy creation The DTPM framework is available but without a way to configure it. This change provides a way to create a hierarchy of DTPM node where the power consumption reflects the sum of the children's power consumption. It is up to the platform to specify an array of dtpm nodes where each element has a pointer to its parent, except the top most one. The type of the node gives the indication of which initialization callback to call. At this time, we can create a virtual node, where its purpose is to be a parent in the hierarchy, and a DT node where the name describes its path. In order to ensure a nice self-encapsulation, the DTPM subsys array contains a couple of initialization functions, one to setup the DTPM backend and one to initialize it up. With this approach, the DTPM framework has a very few material to export. Signed-off-by: Daniel Lezcano Reviewed-by: Ulf Hansson Link: https://lore.kernel.org/r/20220128163537.212248-3-daniel.lezcano@linaro.org --- drivers/powercap/Kconfig | 1 + drivers/powercap/dtpm.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++- include/linux/dtpm.h | 15 ++++ 3 files changed, 203 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index 8242e8c5ed77..b1ca339957e3 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -46,6 +46,7 @@ config IDLE_INJECT config DTPM bool "Power capping for Dynamic Thermal Power Management (EXPERIMENTAL)" + depends on OF help This enables support for the power capping for the dynamic thermal power management userspace engine. diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index 0e5c93443c70..414826a1509b 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "dtpm_subsys.h" @@ -463,14 +464,197 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) return 0; } -static int __init init_dtpm(void) +static struct dtpm *dtpm_setup_virtual(const struct dtpm_node *hierarchy, + struct dtpm *parent) { + struct dtpm *dtpm; + int ret; + + dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL); + if (!dtpm) + return ERR_PTR(-ENOMEM); + dtpm_init(dtpm, NULL); + + ret = dtpm_register(hierarchy->name, dtpm, parent); + if (ret) { + pr_err("Failed to register dtpm node '%s': %d\n", + hierarchy->name, ret); + kfree(dtpm); + return ERR_PTR(ret); + } + + return dtpm; +} + +static struct dtpm *dtpm_setup_dt(const struct dtpm_node *hierarchy, + struct dtpm *parent) +{ + struct device_node *np; + int i, ret; + + np = of_find_node_by_path(hierarchy->name); + if (!np) { + pr_err("Failed to find '%s'\n", hierarchy->name); + return ERR_PTR(-ENXIO); + } + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->setup) + continue; + + ret = dtpm_subsys[i]->setup(parent, np); + if (ret) { + pr_err("Failed to setup '%s': %d\n", dtpm_subsys[i]->name, ret); + of_node_put(np); + return ERR_PTR(ret); + } + } + + of_node_put(np); + + /* + * By returning a NULL pointer, we let know the caller there + * is no child for us as we are a leaf of the tree + */ + return NULL; +} + +typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *); + +dtpm_node_callback_t dtpm_node_callback[] = { + [DTPM_NODE_VIRTUAL] = dtpm_setup_virtual, + [DTPM_NODE_DT] = dtpm_setup_dt, +}; + +static int dtpm_for_each_child(const struct dtpm_node *hierarchy, + const struct dtpm_node *it, struct dtpm *parent) +{ + struct dtpm *dtpm; + int i, ret; + + for (i = 0; hierarchy[i].name; i++) { + + if (hierarchy[i].parent != it) + continue; + + dtpm = dtpm_node_callback[hierarchy[i].type](&hierarchy[i], parent); + + /* + * A NULL pointer means there is no children, hence we + * continue without going deeper in the recursivity. + */ + if (!dtpm) + continue; + + /* + * There are multiple reasons why the callback could + * fail. The generic glue is abstracting the backend + * and therefore it is not possible to report back or + * take a decision based on the error. In any case, + * if this call fails, it is not critical in the + * hierarchy creation, we can assume the underlying + * service is not found, so we continue without this + * branch in the tree but with a warning to log the + * information the node was not created. + */ + if (IS_ERR(dtpm)) { + pr_warn("Failed to create '%s' in the hierarchy\n", + hierarchy[i].name); + continue; + } + + ret = dtpm_for_each_child(hierarchy, &hierarchy[i], dtpm); + if (ret) + return ret; + } + + return 0; +} + +/** + * dtpm_create_hierarchy - Create the dtpm hierarchy + * @hierarchy: An array of struct dtpm_node describing the hierarchy + * + * The function is called by the platform specific code with the + * description of the different node in the hierarchy. It creates the + * tree in the sysfs filesystem under the powercap dtpm entry. + * + * The expected tree has the format: + * + * struct dtpm_node hierarchy[] = { + * [0] { .name = "topmost", type = DTPM_NODE_VIRTUAL }, + * [1] { .name = "package", .type = DTPM_NODE_VIRTUAL, .parent = &hierarchy[0] }, + * [2] { .name = "/cpus/cpu0", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [3] { .name = "/cpus/cpu1", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [4] { .name = "/cpus/cpu2", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [5] { .name = "/cpus/cpu3", .type = DTPM_NODE_DT, .parent = &hierarchy[1] }, + * [6] { } + * }; + * + * The last element is always an empty one and marks the end of the + * array. + * + * Return: zero on success, a negative value in case of error. Errors + * are reported back from the underlying functions. + */ +int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table) +{ + const struct of_device_id *match; + const struct dtpm_node *hierarchy; + struct device_node *np; + int i, ret; + + if (pct) + return -EBUSY; + pct = powercap_register_control_type(NULL, "dtpm", NULL); if (IS_ERR(pct)) { pr_err("Failed to register control type\n"); - return PTR_ERR(pct); + ret = PTR_ERR(pct); + goto out_pct; + } + + ret = -ENODEV; + np = of_find_node_by_path("/"); + if (!np) + goto out_err; + + match = of_match_node(dtpm_match_table, np); + + of_node_put(np); + + if (!match) + goto out_err; + + hierarchy = match->data; + if (!hierarchy) { + ret = -EFAULT; + goto out_err; + } + + ret = dtpm_for_each_child(hierarchy, NULL, NULL); + if (ret) + goto out_err; + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->init) + continue; + + ret = dtpm_subsys[i]->init(); + if (ret) + pr_info("Failed to initialze '%s': %d", + dtpm_subsys[i]->name, ret); } return 0; + +out_err: + powercap_unregister_control_type(pct); +out_pct: + pct = NULL; + + return ret; } -late_initcall(init_dtpm); +EXPORT_SYMBOL_GPL(dtpm_create_hierarchy); diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index 506048158a50..f7a25c70dd4c 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -32,9 +32,23 @@ struct dtpm_ops { void (*release)(struct dtpm *); }; +struct device_node; + struct dtpm_subsys_ops { const char *name; int (*init)(void); + int (*setup)(struct dtpm *, struct device_node *); +}; + +enum DTPM_NODE_TYPE { + DTPM_NODE_VIRTUAL = 0, + DTPM_NODE_DT, +}; + +struct dtpm_node { + enum DTPM_NODE_TYPE type; + const char *name; + struct dtpm_node *parent; }; static inline struct dtpm *to_dtpm(struct powercap_zone *zone) @@ -52,4 +66,5 @@ void dtpm_unregister(struct dtpm *dtpm); int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); +int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table); #endif -- cgit v1.2.3-71-gd317 From 80110bbfbba6f0078d5a1cbc8df004506db8ffe5 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 3 Feb 2022 20:49:24 -0800 Subject: mm/page_table_check: check entries at pmd levels syzbot detected a case where the page table counters were not properly updated. syzkaller login: ------------[ cut here ]------------ kernel BUG at mm/page_table_check.c:162! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 3099 Comm: pasha Not tainted 5.16.0+ #48 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIO4 RIP: 0010:__page_table_check_zero+0x159/0x1a0 Call Trace: free_pcp_prepare+0x3be/0xaa0 free_unref_page+0x1c/0x650 free_compound_page+0xec/0x130 free_transhuge_page+0x1be/0x260 __put_compound_page+0x90/0xd0 release_pages+0x54c/0x1060 __pagevec_release+0x7c/0x110 shmem_undo_range+0x85e/0x1250 ... The repro involved having a huge page that is split due to uprobe event temporarily replacing one of the pages in the huge page. Later the huge page was combined again, but the counters were off, as the PTE level was not properly updated. Make sure that when PMD is cleared and prior to freeing the level the PTEs are updated. Link: https://lkml.kernel.org/r/20220131203249.2832273-5-pasha.tatashin@soleen.com Fixes: df4e817b7108 ("mm: page table check") Signed-off-by: Pasha Tatashin Acked-by: David Rientjes Cc: Aneesh Kumar K.V Cc: Anshuman Khandual Cc: Dave Hansen Cc: Greg Thelen Cc: H. Peter Anvin Cc: Hugh Dickins Cc: Ingo Molnar Cc: Jiri Slaby Cc: Mike Rapoport Cc: Muchun Song Cc: Paul Turner Cc: Wei Xu Cc: Will Deacon Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_table_check.h | 19 +++++++++++++++++++ mm/khugepaged.c | 3 +++ mm/page_table_check.c | 20 ++++++++++++++++++++ 3 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h index 38cace1da7b6..01e16c7696ec 100644 --- a/include/linux/page_table_check.h +++ b/include/linux/page_table_check.h @@ -26,6 +26,9 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud); +void __page_table_check_pte_clear_range(struct mm_struct *mm, + unsigned long addr, + pmd_t pmd); static inline void page_table_check_alloc(struct page *page, unsigned int order) { @@ -100,6 +103,16 @@ static inline void page_table_check_pud_set(struct mm_struct *mm, __page_table_check_pud_set(mm, addr, pudp, pud); } +static inline void page_table_check_pte_clear_range(struct mm_struct *mm, + unsigned long addr, + pmd_t pmd) +{ + if (static_branch_likely(&page_table_check_disabled)) + return; + + __page_table_check_pte_clear_range(mm, addr, pmd); +} + #else static inline void page_table_check_alloc(struct page *page, unsigned int order) @@ -143,5 +156,11 @@ static inline void page_table_check_pud_set(struct mm_struct *mm, { } +static inline void page_table_check_pte_clear_range(struct mm_struct *mm, + unsigned long addr, + pmd_t pmd) +{ +} + #endif /* CONFIG_PAGE_TABLE_CHECK */ #endif /* __LINUX_PAGE_TABLE_CHECK_H */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 30e59e4af272..131492fd1148 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -1422,10 +1423,12 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v spinlock_t *ptl; pmd_t pmd; + mmap_assert_write_locked(mm); ptl = pmd_lock(vma->vm_mm, pmdp); pmd = pmdp_collapse_flush(vma, addr, pmdp); spin_unlock(ptl); mm_dec_nr_ptes(mm); + page_table_check_pte_clear_range(mm, addr, pmd); pte_free(mm, pmd_pgtable(pmd)); } diff --git a/mm/page_table_check.c b/mm/page_table_check.c index c61d7ebe13b1..3763bd077861 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -247,3 +247,23 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr, } } EXPORT_SYMBOL(__page_table_check_pud_set); + +void __page_table_check_pte_clear_range(struct mm_struct *mm, + unsigned long addr, + pmd_t pmd) +{ + if (&init_mm == mm) + return; + + if (!pmd_bad(pmd) && !pmd_leaf(pmd)) { + pte_t *ptep = pte_offset_map(&pmd, addr); + unsigned long i; + + pte_unmap(ptep); + for (i = 0; i < PTRS_PER_PTE; i++) { + __page_table_check_pte_clear(mm, addr, *ptep); + addr += PAGE_SIZE; + ptep++; + } + } +} -- cgit v1.2.3-71-gd317 From 314c459a6fe0957b5885fbc65c53d51444092880 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 3 Feb 2022 20:49:29 -0800 Subject: mm/pgtable: define pte_index so that preprocessor could recognize it Since commit 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") pte_index is a static inline and there is no define for it that can be recognized by the preprocessor. As a result, vm_insert_pages() uses slower loop over vm_insert_page() instead of insert_pages() that amortizes the cost of spinlock operations when inserting multiple pages. Link: https://lkml.kernel.org/r/20220111145457.20748-1-rppt@kernel.org Fixes: 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") Signed-off-by: Mike Rapoport Reported-by: Christian Dietrich Reviewed-by: Khalid Aziz Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index bc8713a76e03..f4f4077b97aa 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -62,6 +62,7 @@ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } +#define pte_index pte_index #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) -- cgit v1.2.3-71-gd317 From ae26508651272695a3ab353f75ab9a8daf3da324 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sun, 23 Jan 2022 20:45:06 +0800 Subject: cpufreq: Move to_gov_attr_set() to cpufreq.h So it can be reused by other codes. Signed-off-by: Kevin Hao Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_governor_attr_set.c | 5 ----- include/linux/cpufreq.h | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index a6f365b9cc1a..771770ea0ed0 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -8,11 +8,6 @@ #include "cpufreq_governor.h" -static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) -{ - return container_of(kobj, struct gov_attr_set, kobj); -} - static inline struct governor_attr *to_gov_attr(struct attribute *attr) { return container_of(attr, struct governor_attr, attr); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1ab29e61b078..f0dfc0b260ec 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -658,6 +658,11 @@ struct gov_attr_set { /* sysfs ops for cpufreq governors */ extern const struct sysfs_ops governor_sysfs_ops; +static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) +{ + return container_of(kobj, struct gov_attr_set, kobj); +} + void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); -- cgit v1.2.3-71-gd317 From e70e13e7d4ab8f932f49db1c9500b30a34a6d420 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 4 Feb 2022 01:55:18 +0100 Subject: bpf: Implement bpf_core_types_are_compat(). Adopt libbpf's bpf_core_types_are_compat() for kernel duty by adding explicit recursion limit of 2 which is enough to handle 2 levels of function prototypes. Signed-off-by: Matteo Croce Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220204005519.60361-2-mcroce@linux.microsoft.com --- include/linux/btf.h | 5 +++ kernel/bpf/btf.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 109 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/btf.h b/include/linux/btf.h index f6c43dd513fa..36bc09b8e890 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -327,6 +327,11 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo( return (const struct btf_var_secinfo *)(t + 1); } +static inline struct btf_param *btf_params(const struct btf_type *t) +{ + return (struct btf_param *)(t + 1); +} + #ifdef CONFIG_BPF_SYSCALL struct bpf_prog; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 9b47972c57a4..11740b300de9 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6798,10 +6798,113 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, } EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); +#define MAX_TYPES_ARE_COMPAT_DEPTH 2 + +static +int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id, + int level) +{ + const struct btf_type *local_type, *targ_type; + int depth = 32; /* max recursion depth */ + + /* caller made sure that names match (ignoring flavor suffix) */ + local_type = btf_type_by_id(local_btf, local_id); + targ_type = btf_type_by_id(targ_btf, targ_id); + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + +recur: + depth--; + if (depth < 0) + return -EINVAL; + + local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id); + targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id); + if (!local_type || !targ_type) + return -EINVAL; + + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + switch (btf_kind(local_type)) { + case BTF_KIND_UNKN: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + return 1; + case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ + return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; + case BTF_KIND_PTR: + local_id = local_type->type; + targ_id = targ_type->type; + goto recur; + case BTF_KIND_ARRAY: + local_id = btf_array(local_type)->type; + targ_id = btf_array(targ_type)->type; + goto recur; + case BTF_KIND_FUNC_PROTO: { + struct btf_param *local_p = btf_params(local_type); + struct btf_param *targ_p = btf_params(targ_type); + __u16 local_vlen = btf_vlen(local_type); + __u16 targ_vlen = btf_vlen(targ_type); + int i, err; + + if (local_vlen != targ_vlen) + return 0; + + for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { + if (level <= 0) + return -EINVAL; + + btf_type_skip_modifiers(local_btf, local_p->type, &local_id); + btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id); + err = __bpf_core_types_are_compat(local_btf, local_id, + targ_btf, targ_id, + level - 1); + if (err <= 0) + return err; + } + + /* tail recurse for return type check */ + btf_type_skip_modifiers(local_btf, local_type->type, &local_id); + btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id); + goto recur; + } + default: + return 0; + } +} + +/* Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + */ int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id) { - return -EOPNOTSUPP; + return __bpf_core_types_are_compat(local_btf, local_id, + targ_btf, targ_id, + MAX_TYPES_ARE_COMPAT_DEPTH); } static bool bpf_core_is_flavor_sep(const char *s) -- cgit v1.2.3-71-gd317 From 0463e320421b313db320bbcf2928ebf49f556b67 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Fri, 4 Feb 2022 11:42:10 +0000 Subject: net: phylink: remove phylink_set_10g_modes() phylink_set_10g_modes() is no longer used with the conversion of drivers to phylink_generic_validate(), so we can remove it. Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 11 ----------- include/linux/phylink.h | 1 - 2 files changed, 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 420201858564..5b53a3e23c89 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -132,17 +132,6 @@ void phylink_set_port_modes(unsigned long *mask) } EXPORT_SYMBOL_GPL(phylink_set_port_modes); -void phylink_set_10g_modes(unsigned long *mask) -{ - phylink_set(mask, 10000baseT_Full); - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); -} -EXPORT_SYMBOL_GPL(phylink_set_10g_modes); - static int phylink_is_empty_linkmode(const unsigned long *linkmode) { __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, }; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 713a0c928b7c..cca149f78d35 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -582,7 +582,6 @@ int phylink_speed_up(struct phylink *pl); #define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) void phylink_set_port_modes(unsigned long *bits); -void phylink_set_10g_modes(unsigned long *mask); void phylink_helper_basex_speed(struct phylink_link_state *state); void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, -- cgit v1.2.3-71-gd317 From 145c7a793838add5e004e7d49a67654dc7eba147 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Feb 2022 12:15:45 -0800 Subject: ipv6: make mc_forwarding atomic This fixes minor data-races in ip6_mc_input() and batadv_mcast_mla_rtr_flags_softif_get_ipv6() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/ipv6.h | 2 +- net/batman-adv/multicast.c | 2 +- net/ipv6/addrconf.c | 4 ++-- net/ipv6/ip6_input.c | 2 +- net/ipv6/ip6mr.c | 8 ++++---- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 1e0f8a31f3de..16870f86c74d 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -51,7 +51,7 @@ struct ipv6_devconf { __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - __s32 mc_forwarding; + atomic_t mc_forwarding; #endif __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index f4004cf0ff6f..9f311fddfaf9 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -134,7 +134,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) { struct inet6_dev *in6_dev = __in6_dev_get(dev); - if (in6_dev && in6_dev->cnf.mc_forwarding) + if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) return BATADV_NO_FLAGS; else return BATADV_MCAST_WANT_NO_RTR6; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f927c199a93c..ff1b2484b8ed 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -554,7 +554,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, #ifdef CONFIG_IPV6_MROUTE if ((all || type == NETCONFA_MC_FORWARDING) && nla_put_s32(skb, NETCONFA_MC_FORWARDING, - devconf->mc_forwarding) < 0) + atomic_read(&devconf->mc_forwarding)) < 0) goto nla_put_failure; #endif if ((all || type == NETCONFA_PROXY_NEIGH) && @@ -5533,7 +5533,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; + array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding); #endif array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 80256717868e..d4b1e2c5aa76 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -508,7 +508,7 @@ int ip6_mc_input(struct sk_buff *skb) /* * IPv6 multicast router mode is now supported ;) */ - if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && + if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 7cf73e60e619..541cd0887129 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -732,7 +732,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding--; + atomic_dec(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -900,7 +900,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding++; + atomic_inc(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -1551,7 +1551,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk) } else { rcu_assign_pointer(mrt->mroute_sk, sk); sock_set_flag(sk, SOCK_RCU_FREE); - net->ipv6.devconf_all->mc_forwarding++; + atomic_inc(&net->ipv6.devconf_all->mc_forwarding); } write_unlock_bh(&mrt_lock); @@ -1584,7 +1584,7 @@ int ip6mr_sk_done(struct sock *sk) * so the RCU grace period before sk freeing * is guaranteed by sk_destruct() */ - net->ipv6.devconf_all->mc_forwarding--; + atomic_dec(&net->ipv6.devconf_all->mc_forwarding); write_unlock_bh(&mrt_lock); inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, -- cgit v1.2.3-71-gd317 From e3ececfe668facd87d920b608349a32607060e66 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Feb 2022 14:42:35 -0800 Subject: ref_tracker: implement use-after-free detection Whenever ref_tracker_dir_init() is called, mark the struct ref_tracker_dir as dead. Test the dead status from ref_tracker_alloc() and ref_tracker_free() This should detect buggy dev_put()/dev_hold() happening too late in netdevice dismantle process. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/ref_tracker.h | 2 ++ lib/ref_tracker.c | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h index 60f3453be23e..a443abda937d 100644 --- a/include/linux/ref_tracker.h +++ b/include/linux/ref_tracker.h @@ -13,6 +13,7 @@ struct ref_tracker_dir { spinlock_t lock; unsigned int quarantine_avail; refcount_t untracked; + bool dead; struct list_head list; /* List of active trackers */ struct list_head quarantine; /* List of dead trackers */ #endif @@ -26,6 +27,7 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, INIT_LIST_HEAD(&dir->quarantine); spin_lock_init(&dir->lock); dir->quarantine_avail = quarantine_count; + dir->dead = false; refcount_set(&dir->untracked, 1); stack_depot_init(); } diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index a6789c0c626b..32ff6bd497f8 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -20,6 +20,7 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) unsigned long flags; bool leak = false; + dir->dead = true; spin_lock_irqsave(&dir->lock, flags); list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { list_del(&tracker->head); @@ -72,6 +73,8 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, gfp_t gfp_mask = gfp; unsigned long flags; + WARN_ON_ONCE(dir->dead); + if (gfp & __GFP_DIRECT_RECLAIM) gfp_mask |= __GFP_NOFAIL; *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); @@ -100,6 +103,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir, unsigned int nr_entries; unsigned long flags; + WARN_ON_ONCE(dir->dead); + if (!tracker) { refcount_dec(&dir->untracked); return -EEXIST; -- cgit v1.2.3-71-gd317 From 8fd5522f44dcd7f05454ddc4f16d0f821b676cd9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Feb 2022 14:42:36 -0800 Subject: ref_tracker: add a count of untracked references We are still chasing a netdev refcount imbalance, and we suspect we have one rogue dev_put() that is consuming a reference taken from a dev_hold_track() To detect this case, allow ref_tracker_alloc() and ref_tracker_free() to be called with a NULL @trackerp parameter, and use a dedicated refcount_t just for them. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/ref_tracker.h | 2 ++ lib/ref_tracker.c | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h index a443abda937d..9ca353ab712b 100644 --- a/include/linux/ref_tracker.h +++ b/include/linux/ref_tracker.h @@ -13,6 +13,7 @@ struct ref_tracker_dir { spinlock_t lock; unsigned int quarantine_avail; refcount_t untracked; + refcount_t no_tracker; bool dead; struct list_head list; /* List of active trackers */ struct list_head quarantine; /* List of dead trackers */ @@ -29,6 +30,7 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, dir->quarantine_avail = quarantine_count; dir->dead = false; refcount_set(&dir->untracked, 1); + refcount_set(&dir->no_tracker, 1); stack_depot_init(); } diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index 32ff6bd497f8..9c0c2e09df66 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -38,6 +38,7 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir) spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); + WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1); } EXPORT_SYMBOL(ref_tracker_dir_exit); @@ -75,6 +76,10 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, WARN_ON_ONCE(dir->dead); + if (!trackerp) { + refcount_inc(&dir->no_tracker); + return 0; + } if (gfp & __GFP_DIRECT_RECLAIM) gfp_mask |= __GFP_NOFAIL; *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); @@ -98,13 +103,18 @@ int ref_tracker_free(struct ref_tracker_dir *dir, struct ref_tracker **trackerp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; - struct ref_tracker *tracker = *trackerp; depot_stack_handle_t stack_handle; + struct ref_tracker *tracker; unsigned int nr_entries; unsigned long flags; WARN_ON_ONCE(dir->dead); + if (!trackerp) { + refcount_dec(&dir->no_tracker); + return 0; + } + tracker = *trackerp; if (!tracker) { refcount_dec(&dir->untracked); return -EEXIST; -- cgit v1.2.3-71-gd317 From 4c6c11ea0f7b00a1894803efe980dfaf3b074886 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Feb 2022 14:42:37 -0800 Subject: net: refine dev_put()/dev_hold() debugging We are still chasing some syzbot reports where we think a rogue dev_put() is called with no corresponding prior dev_hold(). Unfortunately it eats a reference on dev->dev_refcnt taken by innocent dev_hold_track(), meaning that the refcount saturation splat comes too late to be useful. Make sure that 'not tracked' dev_put() and dev_hold() better use CONFIG_NET_DEV_REFCNT_TRACKER=y debug infrastructure: Prior patch in the series allowed ref_tracker_alloc() and ref_tracker_free() to be called with a NULL @trackerp parameter, and to use a separate refcount only to detect too many put() even in the following case: dev_hold_track(dev, tracker_1, GFP_ATOMIC); dev_hold(dev); dev_put(dev); dev_put(dev); // Should complain loudly here. dev_put_track(dev, tracker_1); // instead of here Add clarification about netdev_tracker_alloc() role. v2: I replaced the dev_put() in linkwatch_do_dev() with __dev_put() because callers called netdev_tracker_free(). Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 69 +++++++++++++++++++++++++++++++---------------- net/core/dev.c | 2 +- net/core/link_watch.c | 6 ++--- 3 files changed, 50 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e490b84732d1..3fb6fb67ed77 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3817,14 +3817,7 @@ extern unsigned int netdev_budget_usecs; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); -/** - * dev_put - release reference to device - * @dev: network device - * - * Release reference to device to allow it to be freed. - * Try using dev_put_track() instead. - */ -static inline void dev_put(struct net_device *dev) +static inline void __dev_put(struct net_device *dev) { if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT @@ -3835,14 +3828,7 @@ static inline void dev_put(struct net_device *dev) } } -/** - * dev_hold - get reference to device - * @dev: network device - * - * Hold reference to device to keep it from being freed. - * Try using dev_hold_track() instead. - */ -static inline void dev_hold(struct net_device *dev) +static inline void __dev_hold(struct net_device *dev) { if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT @@ -3853,11 +3839,24 @@ static inline void dev_hold(struct net_device *dev) } } +static inline void __netdev_tracker_alloc(struct net_device *dev, + netdevice_tracker *tracker, + gfp_t gfp) +{ +#ifdef CONFIG_NET_DEV_REFCNT_TRACKER + ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); +#endif +} + +/* netdev_tracker_alloc() can upgrade a prior untracked reference + * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. + */ static inline void netdev_tracker_alloc(struct net_device *dev, netdevice_tracker *tracker, gfp_t gfp) { #ifdef CONFIG_NET_DEV_REFCNT_TRACKER - ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); + refcount_dec(&dev->refcnt_tracker.no_tracker); + __netdev_tracker_alloc(dev, tracker, gfp); #endif } @@ -3873,8 +3872,8 @@ static inline void dev_hold_track(struct net_device *dev, netdevice_tracker *tracker, gfp_t gfp) { if (dev) { - dev_hold(dev); - netdev_tracker_alloc(dev, tracker, gfp); + __dev_hold(dev); + __netdev_tracker_alloc(dev, tracker, gfp); } } @@ -3883,10 +3882,34 @@ static inline void dev_put_track(struct net_device *dev, { if (dev) { netdev_tracker_free(dev, tracker); - dev_put(dev); + __dev_put(dev); } } +/** + * dev_hold - get reference to device + * @dev: network device + * + * Hold reference to device to keep it from being freed. + * Try using dev_hold_track() instead. + */ +static inline void dev_hold(struct net_device *dev) +{ + dev_hold_track(dev, NULL, GFP_ATOMIC); +} + +/** + * dev_put - release reference to device + * @dev: network device + * + * Release reference to device to allow it to be freed. + * Try using dev_put_track() instead. + */ +static inline void dev_put(struct net_device *dev) +{ + dev_put_track(dev, NULL); +} + static inline void dev_replace_track(struct net_device *odev, struct net_device *ndev, netdevice_tracker *tracker, @@ -3895,11 +3918,11 @@ static inline void dev_replace_track(struct net_device *odev, if (odev) netdev_tracker_free(odev, tracker); - dev_hold(ndev); - dev_put(odev); + __dev_hold(ndev); + __dev_put(odev); if (ndev) - netdev_tracker_alloc(ndev, tracker, gfp); + __netdev_tracker_alloc(ndev, tracker, gfp); } /* Carrier loss detection, dial on demand. The functions netif_carrier_on diff --git a/net/core/dev.c b/net/core/dev.c index f79744d99413..1eaa0b88e3ba 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10172,7 +10172,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) goto free_dev; - dev_hold(dev); + __dev_hold(dev); #else refcount_set(&dev->dev_refcnt, 1); #endif diff --git a/net/core/link_watch.c b/net/core/link_watch.c index b0f5344d1185..95098d1a49bd 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -166,10 +166,10 @@ static void linkwatch_do_dev(struct net_device *dev) netdev_state_change(dev); } - /* Note: our callers are responsible for - * calling netdev_tracker_free(). + /* Note: our callers are responsible for calling netdev_tracker_free(). + * This is the reason we use __dev_put() instead of dev_put(). */ - dev_put(dev); + __dev_put(dev); } static void __linkwatch_run_queue(int urgent_only) -- cgit v1.2.3-71-gd317 From 5a8fb33e530512ee67a11b30f3451a4f030f4b01 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Feb 2022 20:56:14 -0800 Subject: skmsg: convert struct sk_msg_sg::copy to a bitmap We have plans for increasing MAX_SKB_FRAGS, but sk_msg_sg::copy is currently an unsigned long, limiting MAX_SKB_FRAGS to 30 on 32bit arches. Convert it to a bitmap, as Jakub suggested. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skmsg.h | 11 +++++------ net/core/filter.c | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 18a717fe62eb..1ff68a88c58d 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -29,7 +29,7 @@ struct sk_msg_sg { u32 end; u32 size; u32 copybreak; - unsigned long copy; + DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); /* The extra two elements: * 1) used for chaining the front and sections when the list becomes * partitioned (e.g. end < start). The crypto APIs require the @@ -38,7 +38,6 @@ struct sk_msg_sg { */ struct scatterlist data[MAX_MSG_FRAGS + 2]; }; -static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS); /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { @@ -234,7 +233,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) { struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); - if (test_bit(msg->sg.start, &msg->sg.copy)) { + if (test_bit(msg->sg.start, msg->sg.copy)) { msg->data = NULL; msg->data_end = NULL; } else { @@ -253,7 +252,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, sg_set_page(sge, page, len, offset); sg_unmark_end(sge); - __set_bit(msg->sg.end, &msg->sg.copy); + __set_bit(msg->sg.end, msg->sg.copy); msg->sg.size += len; sk_msg_iter_next(msg, end); } @@ -262,9 +261,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) { do { if (copy_state) - __set_bit(i, &msg->sg.copy); + __set_bit(i, msg->sg.copy); else - __clear_bit(i, &msg->sg.copy); + __clear_bit(i, msg->sg.copy); sk_msg_iter_var_next(i); if (i == msg->sg.end) break; diff --git a/net/core/filter.c b/net/core/filter.c index 9615ae1ab530..f497ca7a16d2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2603,7 +2603,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, * account for the headroom. */ bytes_sg_total = start - offset + bytes; - if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len) + if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len) goto out; /* At this point we need to linearize multiple scatterlist @@ -2809,7 +2809,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; - __clear_bit(new, &msg->sg.copy); + __clear_bit(new, msg->sg.copy); sg_set_page(&msg->sg.data[new], page, len + copy, 0); if (rsge.length) { get_page(sg_page(&rsge)); -- cgit v1.2.3-71-gd317 From 88590b369354092183bcba04e2368010c462557f Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:33 +0800 Subject: net: skb_drop_reason: add document for drop reasons Add document for following existing drop reasons: SKB_DROP_REASON_NOT_SPECIFIED SKB_DROP_REASON_NO_SOCKET SKB_DROP_REASON_PKT_TOO_SMALL SKB_DROP_REASON_TCP_CSUM SKB_DROP_REASON_SOCKET_FILTER SKB_DROP_REASON_UDP_CSUM Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a27bcc4f7e9a..f04e3a1f4455 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -314,12 +314,12 @@ struct sk_buff; * used to translate the reason to string. */ enum skb_drop_reason { - SKB_DROP_REASON_NOT_SPECIFIED, - SKB_DROP_REASON_NO_SOCKET, - SKB_DROP_REASON_PKT_TOO_SMALL, - SKB_DROP_REASON_TCP_CSUM, - SKB_DROP_REASON_SOCKET_FILTER, - SKB_DROP_REASON_UDP_CSUM, + SKB_DROP_REASON_NOT_SPECIFIED, /* drop reason is not specified */ + SKB_DROP_REASON_NO_SOCKET, /* socket not found */ + SKB_DROP_REASON_PKT_TOO_SMALL, /* packet size is too small */ + SKB_DROP_REASON_TCP_CSUM, /* TCP checksum error */ + SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ + SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ SKB_DROP_REASON_MAX, }; -- cgit v1.2.3-71-gd317 From 2df3041ba3be950376e8c25a8f6da22f7fcc765c Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:34 +0800 Subject: net: netfilter: use kfree_drop_reason() for NF_DROP Replace kfree_skb() with kfree_skb_reason() in nf_hook_slow() when skb is dropped by reason of NF_DROP. Following new drop reasons are introduced: SKB_DROP_REASON_NETFILTER_DROP Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/trace/events/skb.h | 1 + net/netfilter/core.c | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f04e3a1f4455..9060159b4375 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -320,6 +320,7 @@ enum skb_drop_reason { SKB_DROP_REASON_TCP_CSUM, /* TCP checksum error */ SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ + SKB_DROP_REASON_NETFILTER_DROP, /* dropped by netfilter */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index a8a64b97504d..3d89f7b09a43 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -16,6 +16,7 @@ EM(SKB_DROP_REASON_TCP_CSUM, TCP_CSUM) \ EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \ EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \ + EM(SKB_DROP_REASON_NETFILTER_DROP, NETFILTER_DROP) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 354cb472f386..d1c9dfbb11fa 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -621,7 +621,8 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, case NF_ACCEPT: break; case NF_DROP: - kfree_skb(skb); + kfree_skb_reason(skb, + SKB_DROP_REASON_NETFILTER_DROP); ret = NF_DROP_GETERR(verdict); if (ret == 0) ret = -EPERM; -- cgit v1.2.3-71-gd317 From 33cba42985c8144eef78d618fc1e51aaa074b169 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:35 +0800 Subject: net: ipv4: use kfree_skb_reason() in ip_rcv_core() Replace kfree_skb() with kfree_skb_reason() in ip_rcv_core(). Three new drop reasons are introduced: SKB_DROP_REASON_OTHERHOST SKB_DROP_REASON_IP_CSUM SKB_DROP_REASON_IP_INHDR Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++++ include/trace/events/skb.h | 3 +++ net/ipv4/ip_input.c | 12 ++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9060159b4375..8e82130b3c52 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -321,6 +321,15 @@ enum skb_drop_reason { SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ SKB_DROP_REASON_NETFILTER_DROP, /* dropped by netfilter */ + SKB_DROP_REASON_OTHERHOST, /* packet don't belong to current + * host (interface is in promisc + * mode) + */ + SKB_DROP_REASON_IP_CSUM, /* IP checksum error */ + SKB_DROP_REASON_IP_INHDR, /* there is something wrong with + * IP header (see + * IPSTATS_MIB_INHDRERRORS) + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 3d89f7b09a43..f2b1778485f0 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -17,6 +17,9 @@ EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \ EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \ EM(SKB_DROP_REASON_NETFILTER_DROP, NETFILTER_DROP) \ + EM(SKB_DROP_REASON_OTHERHOST, OTHERHOST) \ + EM(SKB_DROP_REASON_IP_CSUM, IP_CSUM) \ + EM(SKB_DROP_REASON_IP_INHDR, IP_INHDR) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 3a025c011971..7be18de32e16 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -436,13 +436,16 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) { const struct iphdr *iph; + int drop_reason; u32 len; /* When the interface is in promisc. mode, drop all the crap * that it receives, do not try to analyse it. */ - if (skb->pkt_type == PACKET_OTHERHOST) + if (skb->pkt_type == PACKET_OTHERHOST) { + drop_reason = SKB_DROP_REASON_OTHERHOST; goto drop; + } __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); @@ -452,6 +455,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) goto out; } + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto inhdr_error; @@ -488,6 +492,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) len = ntohs(iph->tot_len); if (skb->len < len) { + drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } else if (len < (iph->ihl*4)) @@ -516,11 +521,14 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) return skb; csum_error: + drop_reason = SKB_DROP_REASON_IP_CSUM; __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); inhdr_error: + if (drop_reason == SKB_DROP_REASON_NOT_SPECIFIED) + drop_reason = SKB_DROP_REASON_IP_INHDR; __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); drop: - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); out: return NULL; } -- cgit v1.2.3-71-gd317 From c1f166d1f7eef212096a98b22f5acf92f9af353d Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:36 +0800 Subject: net: ipv4: use kfree_skb_reason() in ip_rcv_finish_core() Replace kfree_skb() with kfree_skb_reason() in ip_rcv_finish_core(), following drop reasons are introduced: SKB_DROP_REASON_IP_RPFILTER SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++++ include/trace/events/skb.h | 3 +++ net/ipv4/ip_input.c | 14 ++++++++++---- 3 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8e82130b3c52..4baba45f223d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -330,6 +330,15 @@ enum skb_drop_reason { * IP header (see * IPSTATS_MIB_INHDRERRORS) */ + SKB_DROP_REASON_IP_RPFILTER, /* IP rpfilter validate failed. + * see the document for rp_filter + * in ip-sysctl.rst for more + * information + */ + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, /* destination address of L2 + * is multicast, but L3 is + * unicast. + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index f2b1778485f0..485a1d3034a4 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -20,6 +20,9 @@ EM(SKB_DROP_REASON_OTHERHOST, OTHERHOST) \ EM(SKB_DROP_REASON_IP_CSUM, IP_CSUM) \ EM(SKB_DROP_REASON_IP_INHDR, IP_INHDR) \ + EM(SKB_DROP_REASON_IP_RPFILTER, IP_RPFILTER) \ + EM(SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, \ + UNICAST_IN_L2_MULTICAST) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 7be18de32e16..d5222c0fa87c 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -318,8 +318,10 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, { const struct iphdr *iph = ip_hdr(skb); int (*edemux)(struct sk_buff *skb); + int err, drop_reason; struct rtable *rt; - int err; + + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (ip_can_use_hint(skb, iph, hint)) { err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos, @@ -396,19 +398,23 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, * so-called "hole-196" attack) so do it for both. */ if (in_dev && - IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) + IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) { + drop_reason = SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST; goto drop; + } } return NET_RX_SUCCESS; drop: - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); return NET_RX_DROP; drop_error: - if (err == -EXDEV) + if (err == -EXDEV) { + drop_reason = SKB_DROP_REASON_IP_RPFILTER; __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); + } goto drop; } -- cgit v1.2.3-71-gd317 From 10580c4791902571777603cb980414892dd5f149 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:37 +0800 Subject: net: ipv4: use kfree_skb_reason() in ip_protocol_deliver_rcu() Replace kfree_skb() with kfree_skb_reason() in ip_protocol_deliver_rcu(). Following new drop reasons are introduced: SKB_DROP_REASON_XFRM_POLICY SKB_DROP_REASON_IP_NOPROTO Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 ++ include/trace/events/skb.h | 2 ++ net/ipv4/ip_input.c | 5 +++-- 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4baba45f223d..2a64afa97910 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -339,6 +339,8 @@ enum skb_drop_reason { * is multicast, but L3 is * unicast. */ + SKB_DROP_REASON_XFRM_POLICY, /* xfrm policy check failed */ + SKB_DROP_REASON_IP_NOPROTO, /* no support for IP protocol */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 485a1d3034a4..985e481c092d 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -23,6 +23,8 @@ EM(SKB_DROP_REASON_IP_RPFILTER, IP_RPFILTER) \ EM(SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, \ UNICAST_IN_L2_MULTICAST) \ + EM(SKB_DROP_REASON_XFRM_POLICY, XFRM_POLICY) \ + EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index d5222c0fa87c..d94f9f7e60c3 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -196,7 +196,8 @@ resubmit: if (ipprot) { if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { - kfree_skb(skb); + kfree_skb_reason(skb, + SKB_DROP_REASON_XFRM_POLICY); return; } nf_reset_ct(skb); @@ -215,7 +216,7 @@ resubmit: icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); } - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO); } else { __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS); consume_skb(skb); -- cgit v1.2.3-71-gd317 From 08d4c0370c400fa6ef2194f9ee2e8dccc4a7ab39 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 5 Feb 2022 15:47:39 +0800 Subject: net: udp: use kfree_skb_reason() in __udp_queue_rcv_skb() Replace kfree_skb() with kfree_skb_reason() in __udp_queue_rcv_skb(). Following new drop reasons are introduced: SKB_DROP_REASON_SOCKET_RCVBUFF SKB_DROP_REASON_PROTO_MEM Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ include/trace/events/skb.h | 2 ++ net/ipv4/udp.c | 10 +++++++--- 3 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2a64afa97910..a5adbf6b51e8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -341,6 +341,11 @@ enum skb_drop_reason { */ SKB_DROP_REASON_XFRM_POLICY, /* xfrm policy check failed */ SKB_DROP_REASON_IP_NOPROTO, /* no support for IP protocol */ + SKB_DROP_REASON_SOCKET_RCVBUFF, /* socket receive buff is full */ + SKB_DROP_REASON_PROTO_MEM, /* proto memory limition, such as + * udp packet drop out of + * udp_memory_allocated. + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 985e481c092d..cfcfd26399f7 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -25,6 +25,8 @@ UNICAST_IN_L2_MULTICAST) \ EM(SKB_DROP_REASON_XFRM_POLICY, XFRM_POLICY) \ EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ + EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ + EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 952f5bf108a5..6b4d8361560f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2093,16 +2093,20 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); + int drop_reason; /* Note that an ENOMEM error is charged twice */ - if (rc == -ENOMEM) + if (rc == -ENOMEM) { UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); - else + drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; + } else { UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS, is_udplite); + drop_reason = SKB_DROP_REASON_PROTO_MEM; + } UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } -- cgit v1.2.3-71-gd317 From fda17afc6166e975bec1197bd94cd2a3317bce3f Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 7 Feb 2022 11:27:53 +0900 Subject: ata: libata-core: Fix ata_dev_config_cpr() The concurrent positioning ranges log page 47h is a general purpose log page and not a subpage of the indentify device log. Using ata_identify_page_supported() to test for concurrent positioning ranges support is thus wrong. ata_log_supported() must be used. Furthermore, unlike other advanced ATA features (e.g. NCQ priority), accesses to the concurrent positioning ranges log page are not gated by a feature bit from the device IDENTIFY data. Since many older drives react badly to the READ LOG EXT and/or READ LOG DMA EXT commands isued to read device log pages, avoid problems with older drives by limiting the concurrent positioning ranges support detection to drives implementing at least the ACS-4 ATA standard (major version 11). This additional condition effectively turns ata_dev_config_cpr() into a nop for older drives, avoiding problems in the field. Fixes: fe22e1c2f705 ("libata: support concurrent positioning ranges log") BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=215519 Cc: stable@vger.kernel.org Reviewed-by: Hannes Reinecke Tested-by: Abderraouf Adjal Signed-off-by: Damien Le Moal --- drivers/ata/libata-core.c | 14 ++++++-------- include/linux/ata.h | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e1b1dd215267..ba9273f80069 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2448,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (!ata_identify_page_supported(dev, - ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11 || + !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) goto out; /* - * Read IDENTIFY DEVICE data log, page 0x47 - * (concurrent positioning ranges). We can have at most 255 32B range - * descriptors plus a 64B header. + * Read the concurrent positioning ranges log (0x47). We can have at + * most 255 32B range descriptors plus a 64B header. */ buf_len = (64 + 255 * 32 + 511) & ~511; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; - err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, - ATA_LOG_CONCURRENT_POSITIONING_RANGES, - buf, buf_len >> 9); + err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES, + 0, buf, buf_len >> 9); if (err_mask) goto out; diff --git a/include/linux/ata.h b/include/linux/ata.h index 199e47e97d64..21292b5bbb55 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -324,12 +324,12 @@ enum { ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, ATA_LOG_IDENTIFY_DEVICE = 0x30, + ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47, /* Identify device log pages: */ ATA_LOG_SECURITY = 0x06, ATA_LOG_SATA_SETTINGS = 0x08, ATA_LOG_ZONED_INFORMATION = 0x09, - ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47, /* Identify device SATA settings log:*/ ATA_LOG_DEVSLP_OFFSET = 0x30, -- cgit v1.2.3-71-gd317 From ad5e35f58384179bbd3cdb349904e150f364c4f3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 28 Jan 2022 12:34:14 +0100 Subject: mtd: Replace the expert mode symbols with a single helper Reduce the number of exported symbols by replacing: - mtd_expert_analysis_warning (the error string) - mtd_expert_analysis_mode (the boolean) with a single helper: - mtd_check_expert_analysis_mode Calling this helper will both check/return the content of the internal boolean -which is not exported anymore- and as well conditionally WARN_ONCE() the user, like it was done before. While on this function, make the error string local to the helper and set it const. Only export this helper when CONFIG_DEBUG_FS is defined to limit the growth of the Linux kernel size only for a debug feature on production kernels. Mechanically update all the consumers. Suggested-by: Geert Uytterhoeven Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20220128113414.1121924-1-miquel.raynal@bootlin.com --- drivers/mtd/mtdcore.c | 23 +++++++++++++++-------- drivers/mtd/nand/core.c | 2 +- drivers/mtd/nand/raw/nand_base.c | 2 +- drivers/mtd/nand/raw/nand_bbt.c | 2 +- include/linux/mtd/mtd.h | 8 ++++++-- 5 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 70f492dce158..1e7f3bbf847e 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -358,6 +358,21 @@ static int mtd_partname_debug_show(struct seq_file *s, void *p) DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); +static bool mtd_expert_analysis_mode; + +#ifdef CONFIG_DEBUG_FS +bool mtd_check_expert_analysis_mode(void) +{ + const char *mtd_expert_analysis_warning = + "Bad block checks have been entirely disabled.\n" + "This is only reserved for post-mortem forensics and debug purposes.\n" + "Never enable this mode if you do not know what you are doing!\n"; + + return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning); +} +EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); +#endif + static struct dentry *dfs_dir_mtd; static void mtd_debugfs_populate(struct mtd_info *mtd) @@ -2370,14 +2385,6 @@ static struct backing_dev_info * __init mtd_bdi_init(const char *name) return ret ? ERR_PTR(ret) : bdi; } -char *mtd_expert_analysis_warning = - "Bad block checks have been entirely disabled.\n" - "This is only reserved for post-mortem forensics and debug purposes.\n" - "Never enable this mode if you do not know what you are doing!\n"; -EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning); -bool mtd_expert_analysis_mode; -EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode); - static struct proc_dir_entry *proc_mtd; static int __init init_mtd(void) diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index 416947f28b67..3ff99a42a63f 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -21,7 +21,7 @@ */ bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) { - if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning)) + if (mtd_check_expert_analysis_mode()) return false; if (nanddev_bbt_is_initialized(nand)) { diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index e7b2ba016d8c..068ecf979033 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -321,7 +321,7 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) if (nand_region_is_secured(chip, ofs, mtd->erasesize)) return -EIO; - if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning)) + if (mtd_check_expert_analysis_mode()) return 0; if (chip->legacy.block_bad) diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index ab630af3a309..a3723da2e0a0 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -1455,7 +1455,7 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt) pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", (unsigned int)offs, block, res); - if (WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning)) + if (mtd_check_expert_analysis_mode()) return 0; switch (res) { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 1ffa933121f6..1b3fc8c71ab3 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -711,7 +711,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) { unsigned mtd_mmap_capabilities(struct mtd_info *mtd); -extern char *mtd_expert_analysis_warning; -extern bool mtd_expert_analysis_mode; +#ifdef CONFIG_DEBUG_FS +bool mtd_check_expert_analysis_mode(void); +#else +static inline bool mtd_check_expert_analysis_mode(void) { return false; } +#endif + #endif /* __MTD_MTD_H__ */ -- cgit v1.2.3-71-gd317 From 6bf625a4140f24b490766043b307f8252519578b Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Sun, 6 Feb 2022 11:36:56 -0800 Subject: Drivers: hv: vmbus: Rework use of DMA_BIT_MASK(64) Using DMA_BIT_MASK(64) as an initializer for a global variable causes problems with Clang 12.0.1. The compiler doesn't understand that value 64 is excluded from the shift at compile time, resulting in a build error. While this is a compiler problem, avoid the issue by setting up the dma_mask memory as part of struct hv_device, and initialize it using dma_set_mask(). Reported-by: Nathan Chancellor Reported-by: Vitaly Chikunov Reported-by: Jakub Kicinski Fixes: 743b237c3a7b ("scsi: storvsc: Add Isolation VM support for storvsc driver") Signed-off-by: Michael Kelley Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Link: https://lore.kernel.org/r/1644176216-12531-1-git-send-email-mikelley@microsoft.com Signed-off-by: Wei Liu --- drivers/hv/vmbus_drv.c | 4 ++-- include/linux/hyperv.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 34a4fd21bdf5..12a2b37e87f3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2082,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type, return child_device_obj; } -static u64 vmbus_dma_mask = DMA_BIT_MASK(64); /* * vmbus_device_register - Register the child device */ @@ -2123,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj) } hv_debug_add_dev_dir(child_device_obj); - child_device_obj->device.dma_mask = &vmbus_dma_mask; child_device_obj->device.dma_parms = &child_device_obj->dma_parms; + child_device_obj->device.dma_mask = &child_device_obj->dma_mask; + dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); return 0; err_kset_unregister: diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f565a8938836..fe2e0179ed51 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1262,6 +1262,7 @@ struct hv_device { struct vmbus_channel *channel; struct kset *channels_kset; struct device_dma_parameters dma_parms; + u64 dma_mask; /* place holder to keep track of the dir for hv device in debugfs */ struct dentry *debug_dir; -- cgit v1.2.3-71-gd317 From cb1f65c1e1424a4b5e4a86da8aa3b8fd8459c8ec Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 4 Feb 2022 18:35:22 +0100 Subject: PM: s2idle: ACPI: Fix wakeup interrupts handling After commit e3728b50cd9b ("ACPI: PM: s2idle: Avoid possible race related to the EC GPE") wakeup interrupts occurring immediately after the one discarded by acpi_s2idle_wake() may be missed. Moreover, if the SCI triggers again immediately after the rearming in acpi_s2idle_wake(), that wakeup may be missed too. The problem is that pm_system_irq_wakeup() only calls pm_system_wakeup() when pm_wakeup_irq is 0, but that's not the case any more after the interrupt causing acpi_s2idle_wake() to run until pm_wakeup_irq is cleared by the pm_wakeup_clear() call in s2idle_loop(). However, there may be wakeup interrupts occurring in that time frame and if that happens, they will be missed. To address that issue first move the clearing of pm_wakeup_irq to the point at which it is known that the interrupt causing acpi_s2idle_wake() to tun will be discarded, before rearming the SCI for wakeup. Moreover, because that only reduces the size of the time window in which the issue may manifest itself, allow pm_system_irq_wakeup() to register two second wakeup interrupts in a row and, when discarding the first one, replace it with the second one. [Of course, this assumes that only one wakeup interrupt can be discarded in one go, but currently that is the case and I am not aware of any plans to change that.] Fixes: e3728b50cd9b ("ACPI: PM: s2idle: Avoid possible race related to the EC GPE") Cc: 5.4+ # 5.4+ Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 1 + drivers/base/power/wakeup.c | 41 ++++++++++++++++++++++++++++++++++------- include/linux/suspend.h | 4 ++-- kernel/power/main.c | 5 ++++- kernel/power/process.c | 2 +- kernel/power/suspend.c | 2 -- 6 files changed, 42 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index fac7c9d4c9a1..d4fbea91ab6b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -758,6 +758,7 @@ bool acpi_s2idle_wake(void) return true; } + pm_wakeup_clear(acpi_sci_irq); rearm_wake_irq(acpi_sci_irq); } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 99bda0da23a8..8666590201c9 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ -unsigned int pm_wakeup_irq __read_mostly; +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; @@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void) atomic_dec_if_positive(&pm_abort_suspend); } -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(unsigned int irq_number) { - pm_wakeup_irq = 0; - if (reset) + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { - if (pm_wakeup_irq == 0) { - pm_wakeup_irq = irq_number; + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) pm_system_wakeup(); - } +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; } /** diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3e8ecdebe601..300273ff40cc 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -497,14 +497,14 @@ extern void ksys_sync_helper(void); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; -extern unsigned int pm_wakeup_irq; extern suspend_state_t pm_suspend_target_state; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_system_cancel_wakeup(void); -extern void pm_wakeup_clear(bool reset); +extern void pm_wakeup_clear(unsigned int irq_number); extern void pm_system_irq_wakeup(unsigned int irq_number); +extern unsigned int pm_wakeup_irq(void); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); diff --git a/kernel/power/main.c b/kernel/power/main.c index 44169f3081fd..7e646079fbeb 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; + if (!pm_wakeup_irq()) + return -ENODATA; + + return sprintf(buf, "%u\n", pm_wakeup_irq()); } power_attr_ro(pm_wakeup_irq); diff --git a/kernel/power/process.c b/kernel/power/process.c index b7e7798637b8..11b570fcf049 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -134,7 +134,7 @@ int freeze_processes(void) if (!pm_freezing) atomic_inc(&system_freezing_cnt); - pm_wakeup_clear(true); + pm_wakeup_clear(0); pr_info("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 80cc1f0f502b..6fcdee7e87a5 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -136,8 +136,6 @@ static void s2idle_loop(void) break; } - pm_wakeup_clear(false); - s2idle_enter(); } -- cgit v1.2.3-71-gd317 From ea4692c75e1c63926e4fb0728f5775ef0d733888 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 26 Jan 2022 01:39:41 -0800 Subject: lib/string_helpers: Consolidate string helpers implementation There are a few implementations of string helpers in the tree like yesno() that just returns "yes" or "no" depending on a boolean argument. Those are helpful to output strings to the user or log. In order to consolidate them, prefix all of them str_ prefix to make it clear what they are about and avoid symbol clashes. Taking the commoon `val ? "yes" : "no"` implementation, quite a few users of open coded yesno() could later be converted to the new function: $ git grep '?\s*"yes"\s*' | wc -l 286 $ git grep '?\s*"no"\s*' | wc -l 20 The inlined function should keep the const strings local to each compilation unit, the same way it's now, thus not changing the current behavior. Signed-off-by: Lucas De Marchi Reviewed-by: Andy Shevchenko Acked-by: Jani Nikula Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220126093951.1470898-2-lucas.demarchi@intel.com --- include/linux/string_helpers.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 7a22921c9db7..4d72258d42fd 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -106,4 +106,24 @@ void kfree_strarray(char **array, size_t n); char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n); +static inline const char *str_yes_no(bool v) +{ + return v ? "yes" : "no"; +} + +static inline const char *str_on_off(bool v) +{ + return v ? "on" : "off"; +} + +static inline const char *str_enable_disable(bool v) +{ + return v ? "enable" : "disable"; +} + +static inline const char *str_enabled_disabled(bool v) +{ + return v ? "enabled" : "disabled"; +} + #endif -- cgit v1.2.3-71-gd317 From 7938f4218168ae9fc4bdddb15976f9ebbae41999 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 4 Feb 2022 09:05:41 -0800 Subject: dma-buf-map: Rename to iosys-map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename struct dma_buf_map to struct iosys_map and corresponding APIs. Over time dma-buf-map grew up to more functionality than the one used by dma-buf: in fact it's just a shim layer to abstract system memory, that can be accessed via regular load and store, from IO memory that needs to be acessed via arch helpers. The idea is to extend this API so it can fulfill other needs, internal to a single driver. Example: in the i915 driver it's desired to share the implementation for integrated graphics, which uses mostly system memory, with discrete graphics, which may need to access IO memory. The conversion was mostly done with the following semantic patch: @r1@ @@ - struct dma_buf_map + struct iosys_map @r2@ @@ ( - DMA_BUF_MAP_INIT_VADDR + IOSYS_MAP_INIT_VADDR | - dma_buf_map_set_vaddr + iosys_map_set_vaddr | - dma_buf_map_set_vaddr_iomem + iosys_map_set_vaddr_iomem | - dma_buf_map_is_equal + iosys_map_is_equal | - dma_buf_map_is_null + iosys_map_is_null | - dma_buf_map_is_set + iosys_map_is_set | - dma_buf_map_clear + iosys_map_clear | - dma_buf_map_memcpy_to + iosys_map_memcpy_to | - dma_buf_map_incr + iosys_map_incr ) @@ @@ - #include + #include Then some files had their includes adjusted and some comments were update to remove mentions to dma-buf-map. Since this is not specific to dma-buf anymore, move the documentation to the "Bus-Independent Device Accesses" section. v2: - Squash patches v3: - Fix wrong removal of dma-buf.h from MAINTAINERS - Move documentation from dma-buf.rst to device-io.rst v4: - Change documentation title and level Signed-off-by: Lucas De Marchi Acked-by: Christian König Acked-by: Sumit Semwal Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220204170541.829227-1-lucas.demarchi@intel.com --- Documentation/driver-api/device-io.rst | 9 + Documentation/driver-api/dma-buf.rst | 9 - Documentation/gpu/todo.rst | 20 +- MAINTAINERS | 9 +- drivers/dma-buf/dma-buf.c | 22 +- drivers/dma-buf/heaps/cma_heap.c | 10 +- drivers/dma-buf/heaps/system_heap.c | 10 +- drivers/gpu/drm/ast/ast_drv.h | 2 +- drivers/gpu/drm/ast/ast_mode.c | 8 +- drivers/gpu/drm/drm_cache.c | 18 +- drivers/gpu/drm/drm_client.c | 9 +- drivers/gpu/drm/drm_fb_helper.c | 12 +- drivers/gpu/drm/drm_gem.c | 12 +- drivers/gpu/drm/drm_gem_cma_helper.c | 9 +- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 16 +- drivers/gpu/drm/drm_gem_shmem_helper.c | 15 +- drivers/gpu/drm/drm_gem_ttm_helper.c | 4 +- drivers/gpu/drm/drm_gem_vram_helper.c | 25 +- drivers/gpu/drm/drm_internal.h | 6 +- drivers/gpu/drm/drm_mipi_dbi.c | 8 +- drivers/gpu/drm/drm_prime.c | 4 +- drivers/gpu/drm/etnaviv/etnaviv_drv.h | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 8 +- drivers/gpu/drm/gud/gud_pipe.c | 4 +- drivers/gpu/drm/hyperv/hyperv_drm_modeset.c | 5 +- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 8 +- .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 6 +- drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c | 6 +- drivers/gpu/drm/lima/lima_gem.c | 3 +- drivers/gpu/drm/lima/lima_sched.c | 4 +- drivers/gpu/drm/mediatek/mtk_drm_gem.c | 7 +- drivers/gpu/drm/mediatek/mtk_drm_gem.h | 5 +- drivers/gpu/drm/mgag200/mgag200_mode.c | 4 +- drivers/gpu/drm/msm/msm_drv.h | 4 +- drivers/gpu/drm/msm/msm_gem_prime.c | 6 +- drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 13 +- drivers/gpu/drm/qxl/qxl_display.c | 8 +- drivers/gpu/drm/qxl/qxl_draw.c | 6 +- drivers/gpu/drm/qxl/qxl_drv.h | 10 +- drivers/gpu/drm/qxl/qxl_object.c | 8 +- drivers/gpu/drm/qxl/qxl_object.h | 4 +- drivers/gpu/drm/qxl/qxl_prime.c | 4 +- drivers/gpu/drm/radeon/radeon_gem.c | 1 + drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 9 +- drivers/gpu/drm/rockchip/rockchip_drm_gem.h | 5 +- drivers/gpu/drm/tegra/gem.c | 10 +- drivers/gpu/drm/tiny/cirrus.c | 8 +- drivers/gpu/drm/tiny/gm12u320.c | 7 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 16 +- drivers/gpu/drm/ttm/ttm_resource.c | 42 ++-- drivers/gpu/drm/ttm/ttm_tt.c | 8 +- drivers/gpu/drm/udl/udl_modeset.c | 3 +- drivers/gpu/drm/vboxvideo/vbox_mode.c | 4 +- drivers/gpu/drm/vkms/vkms_composer.c | 4 +- drivers/gpu/drm/vkms/vkms_drv.h | 6 +- drivers/gpu/drm/vkms/vkms_plane.c | 2 +- drivers/gpu/drm/vkms/vkms_writeback.c | 2 +- drivers/gpu/drm/xen/xen_drm_front_gem.c | 7 +- drivers/gpu/drm/xen/xen_drm_front_gem.h | 6 +- .../media/common/videobuf2/videobuf2-dma-contig.c | 8 +- drivers/media/common/videobuf2/videobuf2-dma-sg.c | 9 +- drivers/media/common/videobuf2/videobuf2-vmalloc.c | 11 +- drivers/misc/fastrpc.c | 4 +- include/drm/drm_cache.h | 6 +- include/drm/drm_client.h | 7 +- include/drm/drm_gem.h | 6 +- include/drm/drm_gem_atomic_helper.h | 6 +- include/drm/drm_gem_cma_helper.h | 6 +- include/drm/drm_gem_framebuffer_helper.h | 8 +- include/drm/drm_gem_shmem_helper.h | 12 +- include/drm/drm_gem_ttm_helper.h | 6 +- include/drm/drm_gem_vram_helper.h | 9 +- include/drm/drm_prime.h | 6 +- include/drm/ttm/ttm_bo_api.h | 10 +- include/drm/ttm/ttm_kmap_iter.h | 10 +- include/drm/ttm/ttm_resource.h | 6 +- include/linux/dma-buf-map.h | 266 --------------------- include/linux/dma-buf.h | 12 +- include/linux/iosys-map.h | 257 ++++++++++++++++++++ 79 files changed, 599 insertions(+), 568 deletions(-) delete mode 100644 include/linux/dma-buf-map.h create mode 100644 include/linux/iosys-map.h (limited to 'include/linux') diff --git a/Documentation/driver-api/device-io.rst b/Documentation/driver-api/device-io.rst index e9f04b1815d1..4d2baac0311c 100644 --- a/Documentation/driver-api/device-io.rst +++ b/Documentation/driver-api/device-io.rst @@ -502,6 +502,15 @@ pcim_iomap() Not using these wrappers may make drivers unusable on certain platforms with stricter rules for mapping I/O memory. +Generalizing Access to System and I/O Memory +============================================ + +.. kernel-doc:: include/linux/iosys-map.h + :doc: overview + +.. kernel-doc:: include/linux/iosys-map.h + :internal: + Public Functions Provided ========================= diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 2cd7db82d9fe..55006678394a 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -128,15 +128,6 @@ Kernel Functions and Structures Reference .. kernel-doc:: include/linux/dma-buf.h :internal: -Buffer Mapping Helpers -~~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: include/linux/dma-buf-map.h - :doc: overview - -.. kernel-doc:: include/linux/dma-buf-map.h - :internal: - Reservation Objects ------------------- diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index da138dd39883..c8f39c1ef1ee 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -222,7 +222,7 @@ Convert drivers to use drm_fbdev_generic_setup() Most drivers can use drm_fbdev_generic_setup(). Driver have to implement atomic modesetting and GEM vmap support. Historically, generic fbdev emulation expected the framebuffer in system memory or system-like memory. By employing -struct dma_buf_map, drivers with frambuffers in I/O memory can be supported +struct iosys_map, drivers with frambuffers in I/O memory can be supported as well. Contact: Maintainer of the driver you plan to convert @@ -234,7 +234,7 @@ Reimplement functions in drm_fbdev_fb_ops without fbdev A number of callback functions in drm_fbdev_fb_ops could benefit from being rewritten without dependencies on the fbdev module. Some of the -helpers could further benefit from using struct dma_buf_map instead of +helpers could further benefit from using struct iosys_map instead of raw pointers. Contact: Thomas Zimmermann , Daniel Vetter @@ -434,19 +434,19 @@ Contact: Emil Velikov, respective driver maintainers Level: Intermediate -Use struct dma_buf_map throughout codebase ------------------------------------------- +Use struct iosys_map throughout codebase +---------------------------------------- -Pointers to shared device memory are stored in struct dma_buf_map. Each +Pointers to shared device memory are stored in struct iosys_map. Each instance knows whether it refers to system or I/O memory. Most of the DRM-wide -interface have been converted to use struct dma_buf_map, but implementations +interface have been converted to use struct iosys_map, but implementations often still use raw pointers. -The task is to use struct dma_buf_map where it makes sense. +The task is to use struct iosys_map where it makes sense. -* Memory managers should use struct dma_buf_map for dma-buf-imported buffers. -* TTM might benefit from using struct dma_buf_map internally. -* Framebuffer copying and blitting helpers should operate on struct dma_buf_map. +* Memory managers should use struct iosys_map for dma-buf-imported buffers. +* TTM might benefit from using struct iosys_map internally. +* Framebuffer copying and blitting helpers should operate on struct iosys_map. Contact: Thomas Zimmermann , Christian König, Daniel Vetter diff --git a/MAINTAINERS b/MAINTAINERS index f41088418aae..1a18eafee497 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5734,7 +5734,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/driver-api/dma-buf.rst F: drivers/dma-buf/ F: include/linux/*fence.h -F: include/linux/dma-buf* +F: include/linux/dma-buf.h F: include/linux/dma-resv.h K: \bdma_(?:buf|fence|resv)\b @@ -10050,6 +10050,13 @@ F: include/linux/iova.h F: include/linux/of_iommu.h F: include/uapi/linux/iommu.h +IOSYS-MAP HELPERS +M: Thomas Zimmermann +L: dri-devel@lists.freedesktop.org +S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc +F: include/linux/iosys-map.h + IO_URING M: Jens Axboe R: Pavel Begunkov diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 602b12d7470d..df23239b04fc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1047,8 +1047,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * * Interfaces:: * - * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) - * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) + * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) * * The vmap call can fail if there is no vmap support in the exporter, or if * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference @@ -1260,12 +1260,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); * * Returns 0 on success, or a negative errno code otherwise. */ -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { - struct dma_buf_map ptr; + struct iosys_map ptr; int ret = 0; - dma_buf_map_clear(map); + iosys_map_clear(map); if (WARN_ON(!dmabuf)) return -EINVAL; @@ -1276,12 +1276,12 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&dmabuf->lock); if (dmabuf->vmapping_counter) { dmabuf->vmapping_counter++; - BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); *map = dmabuf->vmap_ptr; goto out_unlock; } - BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); ret = dmabuf->ops->vmap(dmabuf, &ptr); if (WARN_ON_ONCE(ret)) @@ -1303,20 +1303,20 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); * @dmabuf: [in] buffer to vunmap * @map: [in] vmap pointer to vunmap */ -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { if (WARN_ON(!dmabuf)) return; - BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); BUG_ON(dmabuf->vmapping_counter == 0); - BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map)); + BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); mutex_lock(&dmabuf->lock); if (--dmabuf->vmapping_counter == 0) { if (dmabuf->ops->vunmap) dmabuf->ops->vunmap(dmabuf, map); - dma_buf_map_clear(&dmabuf->vmap_ptr); + iosys_map_clear(&dmabuf->vmap_ptr); } mutex_unlock(&dmabuf->lock); } diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 83f02bd51dda..28fb04eccdd0 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -202,7 +202,7 @@ static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) return vaddr; } -static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct cma_heap_buffer *buffer = dmabuf->priv; void *vaddr; @@ -211,7 +211,7 @@ static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&buffer->lock); if (buffer->vmap_cnt) { buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); goto out; } @@ -222,14 +222,14 @@ static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) } buffer->vaddr = vaddr; buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); out: mutex_unlock(&buffer->lock); return ret; } -static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct cma_heap_buffer *buffer = dmabuf->priv; @@ -239,7 +239,7 @@ static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = NULL; } mutex_unlock(&buffer->lock); - dma_buf_map_clear(map); + iosys_map_clear(map); } static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index ab7fd896d2c4..fcf836ba9c1f 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -241,7 +241,7 @@ static void *system_heap_do_vmap(struct system_heap_buffer *buffer) return vaddr; } -static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct system_heap_buffer *buffer = dmabuf->priv; void *vaddr; @@ -250,7 +250,7 @@ static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&buffer->lock); if (buffer->vmap_cnt) { buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); goto out; } @@ -262,14 +262,14 @@ static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = vaddr; buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); out: mutex_unlock(&buffer->lock); return ret; } -static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct system_heap_buffer *buffer = dmabuf->priv; @@ -279,7 +279,7 @@ static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = NULL; } mutex_unlock(&buffer->lock); - dma_buf_map_clear(map); + iosys_map_clear(map); } static void system_heap_dma_buf_release(struct dma_buf *dmabuf) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 00bfa41ff7cb..9c8d56b0a41b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -107,7 +107,7 @@ struct ast_cursor_plane { struct { struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; u64 off; } hwc[AST_DEFAULT_HWC_NUM]; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 956c8982192b..400845d4144c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -801,11 +801,11 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); struct drm_framebuffer *fb = new_state->fb; struct ast_private *ast = to_ast_private(plane->dev); - struct dma_buf_map dst_map = + struct iosys_map dst_map = ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map; u64 dst_off = ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off; - struct dma_buf_map src_map = shadow_plane_state->data[0]; + struct iosys_map src_map = shadow_plane_state->data[0]; unsigned int offset_x, offset_y; u16 x, y; u8 x_offset, y_offset; @@ -883,7 +883,7 @@ static void ast_cursor_plane_destroy(struct drm_plane *plane) struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane); size_t i; struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) { gbo = ast_cursor_plane->hwc[i].gbo; @@ -910,7 +910,7 @@ static int ast_cursor_plane_init(struct ast_private *ast) struct drm_plane *cursor_plane = &ast_cursor_plane->base; size_t size, i; struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; int ret; s64 off; diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index f19d9acbe959..4bb093ccf1b8 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -28,10 +28,10 @@ * Authors: Thomas Hellström */ -#include +#include #include #include -#include +#include #include #include @@ -214,14 +214,14 @@ bool drm_need_swiotlb(int dma_bits) } EXPORT_SYMBOL(drm_need_swiotlb); -static void memcpy_fallback(struct dma_buf_map *dst, - const struct dma_buf_map *src, +static void memcpy_fallback(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { if (!dst->is_iomem && !src->is_iomem) { memcpy(dst->vaddr, src->vaddr, len); } else if (!src->is_iomem) { - dma_buf_map_memcpy_to(dst, src->vaddr, len); + iosys_map_memcpy_to(dst, src->vaddr, len); } else if (!dst->is_iomem) { memcpy_fromio(dst->vaddr, src->vaddr_iomem, len); } else { @@ -305,8 +305,8 @@ static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len) * Tries an arch optimized memcpy for prefetching reading out of a WC region, * and if no such beast is available, falls back to a normal memcpy. */ -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { if (WARN_ON(in_interrupt())) { @@ -343,8 +343,8 @@ void drm_memcpy_init_early(void) static_branch_enable(&has_movntdqa); } #else -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { WARN_ON(in_interrupt()); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index ce45e380f4a2..af3b7395bf69 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -3,7 +3,7 @@ * Copyright 2018 Noralf Trønnes */ -#include +#include #include #include #include @@ -309,9 +309,10 @@ err_delete: * 0 on success, or a negative errno code otherwise. */ int -drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map_copy) +drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map_copy) { - struct dma_buf_map *map = &buffer->map; + struct iosys_map *map = &buffer->map; int ret; /* @@ -342,7 +343,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - struct dma_buf_map *map = &buffer->map; + struct iosys_map *map = &buffer->map; drm_gem_vunmap(buffer->gem, map); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ed43b987d306..e9a9d35fbf5e 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -373,7 +373,7 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip, - struct dma_buf_map *dst) + struct iosys_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; @@ -382,11 +382,11 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y; - dma_buf_map_incr(dst, offset); /* go to first pixel within clip rect */ + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ for (y = clip->y1; y < clip->y2; y++) { - dma_buf_map_memcpy_to(dst, src, len); - dma_buf_map_incr(dst, fb->pitches[0]); + iosys_map_memcpy_to(dst, src, len); + iosys_map_incr(dst, fb->pitches[0]); src += fb->pitches[0]; } } @@ -395,7 +395,7 @@ static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip) { struct drm_client_buffer *buffer = fb_helper->buffer; - struct dma_buf_map map, dst; + struct iosys_map map, dst; int ret; /* @@ -2322,7 +2322,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; - struct dma_buf_map map; + struct iosys_map map; int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4dcdec6487bb..8c7b24f4b0e4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include @@ -1165,7 +1165,7 @@ void drm_gem_unpin(struct drm_gem_object *obj) obj->funcs->unpin(obj); } -int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; @@ -1175,23 +1175,23 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) ret = obj->funcs->vmap(obj, map); if (ret) return ret; - else if (dma_buf_map_is_null(map)) + else if (iosys_map_is_null(map)) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_gem_vmap); -void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - if (dma_buf_map_is_null(map)) + if (iosys_map_is_null(map)) return; if (obj->funcs->vunmap) obj->funcs->vunmap(obj, map); /* Always set the mapping to NULL. Callers may rely on this. */ - dma_buf_map_clear(map); + iosys_map_clear(map); } EXPORT_SYMBOL(drm_gem_vunmap); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index cefd0cbf9deb..88c432a7cb3c 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -209,7 +209,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) { struct drm_gem_object *gem_obj = &cma_obj->base; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr); if (gem_obj->import_attach) { if (cma_obj->vaddr) @@ -480,9 +480,10 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map) +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, + struct iosys_map *map) { - dma_buf_map_set_vaddr(map, cma_obj->vaddr); + iosys_map_set_vaddr(map, cma_obj->vaddr); return 0; } @@ -557,7 +558,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *obj; - struct dma_buf_map map; + struct iosys_map map; int ret; ret = dma_buf_vmap(attach->dmabuf, &map); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 746fd8c73845..f4619803acd0 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); * @data: returns the data address for each BO, can be NULL * * This function maps all buffer objects of the given framebuffer into - * kernel address space and stores them in struct dma_buf_map. If the + * kernel address space and stores them in struct iosys_map. If the * mapping operation fails for one of the BOs, the function unmaps the * already established mappings automatically. * @@ -335,8 +335,8 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); * 0 on success, or a negative errno code otherwise. */ int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]) + struct iosys_map map[static DRM_FORMAT_MAX_PLANES], + struct iosys_map data[DRM_FORMAT_MAX_PLANES]) { struct drm_gem_object *obj; unsigned int i; @@ -345,7 +345,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) { obj = drm_gem_fb_get_obj(fb, i); if (!obj) { - dma_buf_map_clear(&map[i]); + iosys_map_clear(&map[i]); continue; } ret = drm_gem_vmap(obj, &map[i]); @@ -356,9 +356,9 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, if (data) { for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) { memcpy(&data[i], &map[i], sizeof(data[i])); - if (dma_buf_map_is_null(&data[i])) + if (iosys_map_is_null(&data[i])) continue; - dma_buf_map_incr(&data[i], fb->offsets[i]); + iosys_map_incr(&data[i], fb->offsets[i]); } } @@ -386,7 +386,7 @@ EXPORT_SYMBOL(drm_gem_fb_vmap); * See drm_gem_fb_vmap() for more information. */ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]) + struct iosys_map map[static DRM_FORMAT_MAX_PLANES]) { unsigned int i = DRM_FORMAT_MAX_PLANES; struct drm_gem_object *obj; @@ -396,7 +396,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, obj = drm_gem_fb_get_obj(fb, i); if (!obj) continue; - if (dma_buf_map_is_null(&map[i])) + if (iosys_map_is_null(&map[i])) continue; drm_gem_vunmap(obj, &map[i]); } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 621924116eb4..3e738aea2664 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -286,13 +286,14 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; if (shmem->vmap_use_count++ > 0) { - dma_buf_map_set_vaddr(map, shmem->vaddr); + iosys_map_set_vaddr(map, shmem->vaddr); return 0; } @@ -319,7 +320,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct if (!shmem->vaddr) ret = -ENOMEM; else - dma_buf_map_set_vaddr(map, shmem->vaddr); + iosys_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -353,7 +354,8 @@ err_zero_use: * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { int ret; @@ -368,7 +370,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *m EXPORT_SYMBOL(drm_gem_shmem_vmap); static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct dma_buf_map *map) + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; @@ -400,7 +402,8 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, * This function hides the differences between dma-buf imported and natively * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { mutex_lock(&shmem->vmap_lock); drm_gem_shmem_vunmap_locked(shmem, map); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index ecf3d2a54a98..d5962a34c01d 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL(drm_gem_ttm_print_info); * 0 on success, or a negative errno code otherwise. */ int drm_gem_ttm_vmap(struct drm_gem_object *gem, - struct dma_buf_map *map) + struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); @@ -78,7 +78,7 @@ EXPORT_SYMBOL(drm_gem_ttm_vmap); * &drm_gem_object_funcs.vmap callback. */ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, - struct dma_buf_map *map) + struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 3f00192215d1..dc7f938bfff2 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include +#include #include #include @@ -116,7 +116,7 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) */ WARN_ON(gbo->vmap_use_count); - WARN_ON(dma_buf_map_is_set(&gbo->map)); + WARN_ON(iosys_map_is_set(&gbo->map)); drm_gem_object_release(&gbo->bo.base); } @@ -365,7 +365,7 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) EXPORT_SYMBOL(drm_gem_vram_unpin); static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - struct dma_buf_map *map) + struct iosys_map *map) { int ret; @@ -377,7 +377,7 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, * page mapping might still be around. Only vmap if the there's * no mapping present. */ - if (dma_buf_map_is_null(&gbo->map)) { + if (iosys_map_is_null(&gbo->map)) { ret = ttm_bo_vmap(&gbo->bo, &gbo->map); if (ret) return ret; @@ -391,14 +391,14 @@ out: } static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, - struct dma_buf_map *map) + struct iosys_map *map) { struct drm_device *dev = gbo->bo.base.dev; if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; - if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) + if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map))) return; /* BUG: map not mapped from this BO */ if (--gbo->vmap_use_count > 0) @@ -428,7 +428,7 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) { int ret; @@ -463,7 +463,8 @@ EXPORT_SYMBOL(drm_gem_vram_vmap); * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See * the documentation for drm_gem_vram_vmap() for more information. */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map) { int ret; @@ -567,7 +568,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo) return; ttm_bo_vunmap(bo, &gbo->map); - dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ + iosys_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ } static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, @@ -802,7 +803,8 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) * Returns: * 0 on success, or a negative error code otherwise. */ -static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) +static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, + struct iosys_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); @@ -815,7 +817,8 @@ static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_m * @gem: The GEM object to unmap * @map: Kernel virtual address where the VRAM GEM object was mapped */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) +static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, + struct iosys_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 17f3548c8ed2..1fbbc19f1ac0 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -33,7 +33,7 @@ struct dentry; struct dma_buf; -struct dma_buf_map; +struct iosys_map; struct drm_connector; struct drm_crtc; struct drm_framebuffer; @@ -174,8 +174,8 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, u32 handle); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 0327d595e028..9314f2ead79f 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -201,8 +201,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_rect *clip, bool swap) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; void *src; int ret; @@ -258,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index c773d3dfb1ab..e3f09f18110c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf); * * Returns 0 on success or a negative errno code otherwise. */ -int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; @@ -690,7 +690,7 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap); * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 049ae87de9be..f32f4771dada 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -49,7 +49,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); -int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6788ea8490d1..3fa2da149639 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -25,14 +25,14 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } -int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; vaddr = etnaviv_gem_vmap(obj); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } @@ -62,7 +62,7 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) { - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map); @@ -77,7 +77,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) { - struct dma_buf_map map; + struct iosys_map map; int ret; lockdep_assert_held(&etnaviv_obj->lock); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index a150a5a4b5d4..4873f9799f41 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -152,8 +152,8 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, { struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; u8 compression = gdrm->compression; - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map map_data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map_data[DRM_FORMAT_MAX_PLANES]; void *vaddr, *buf; size_t pitch, len; int ret = 0; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 93f51e70a951..e82b815f83a6 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -19,7 +19,7 @@ #include "hyperv_drm.h" static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, - const struct dma_buf_map *map, + const struct iosys_map *map, struct drm_rect *rect) { struct hyperv_drm_device *hv = to_hv(fb->dev); @@ -38,7 +38,8 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, return 0; } -static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map) +static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, + const struct iosys_map *map) { struct drm_rect fullscreen = { .x1 = 0, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1b526039a60d..14fdb0796c52 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -74,7 +74,8 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, kfree(sg); } -static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, + struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); void *vaddr; @@ -83,12 +84,13 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, + struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 3cc74b0fed06..b071a58dd6da 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -266,7 +266,7 @@ static int igt_dmabuf_import(void *arg) struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; - struct dma_buf_map map; + struct iosys_map map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; @@ -349,7 +349,7 @@ static int igt_dmabuf_import_ownership(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; - struct dma_buf_map map; + struct iosys_map map; void *ptr; int err; @@ -400,7 +400,7 @@ static int igt_dmabuf_export_vmap(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; - struct dma_buf_map map; + struct iosys_map map; void *ptr; int err; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index 2855d11c7a51..b2a5882b8f81 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -61,7 +61,7 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf) kfree(mock); } -static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); void *vaddr; @@ -69,12 +69,12 @@ static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) vaddr = vm_map_ram(mock->pages, mock->npages, 0); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index f9a9198ef198..d0c2b1422b3b 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -2,6 +2,7 @@ /* Copyright 2017-2019 Qiang Yu */ #include +#include #include #include #include @@ -182,7 +183,7 @@ static int lima_gem_pin(struct drm_gem_object *obj) return drm_gem_shmem_pin(&bo->base); } -static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct lima_bo *bo = to_lima_bo(obj); diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 5612d73f238f..390c969f74ad 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu */ -#include +#include #include #include #include @@ -284,7 +284,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) struct lima_dump_chunk_buffer *buffer_chunk; u32 size, task_size, mem_size; int i; - struct dma_buf_map map; + struct iosys_map map; int ret; mutex_lock(&dev->error_task_list_lock); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index d0544962cfc1..139d7724c6d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -220,7 +220,7 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, return &mtk_gem->base; } -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); struct sg_table *sgt = NULL; @@ -247,12 +247,13 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) out: kfree(sgt); - dma_buf_map_set_vaddr(map, mtk_gem->kvaddr); + iosys_map_set_vaddr(map, mtk_gem->kvaddr); return 0; } -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); void *vaddr = map->vaddr; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 9a359a06cb73..78f23b07a02e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -42,7 +42,8 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map); #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index b983541a4c53..9c817b33c398 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include @@ -845,7 +845,7 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, - struct drm_rect *clip, const struct dma_buf_map *map) + struct drm_rect *clip, const struct iosys_map *map) { void __iomem *dst = mdev->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d7574e6bd4e4..ae52412d529a 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -309,8 +309,8 @@ void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); -int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index fc94e061d6a7..e8f1b7a2ca9c 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -22,19 +22,19 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } -int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; vaddr = msm_gem_get_vaddr(obj); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { msm_gem_put_vaddr(obj); } diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 1d36df5af98d..bc0df93f7f21 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -1,16 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Collabora Ltd */ -#include -#include -#include #include -#include #include +#include #include #include #include +#include +#include +#include + #include "panfrost_device.h" #include "panfrost_features.h" #include "panfrost_gem.h" @@ -73,7 +74,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - struct dma_buf_map map; + struct iosys_map map; struct drm_gem_shmem_object *bo; u32 cfg, as; int ret; @@ -181,7 +182,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(perfcnt->buf); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(perfcnt->buf); if (user != perfcnt->user) return -EINVAL; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 9e0a1e836011..9a9c29b1d3e1 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include @@ -566,8 +566,8 @@ static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, { static const u32 size = 64 * 64 * 4; struct qxl_bo *cursor_bo; - struct dma_buf_map cursor_map; - struct dma_buf_map user_map; + struct iosys_map cursor_map; + struct iosys_map user_map; struct qxl_cursor cursor; int ret; @@ -1183,7 +1183,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; - struct dma_buf_map map; + struct iosys_map map; int monitors_config_size = sizeof(struct qxl_monitors_config) + qxl_num_crtc * sizeof(struct qxl_head); diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 7d27891e87fa..a93de9e1977a 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -20,7 +20,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include +#include #include @@ -44,7 +44,7 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, unsigned int num_clips, struct qxl_bo *clips_bo) { - struct dma_buf_map map; + struct iosys_map map; struct qxl_clip_rects *dev_clips; int ret; @@ -146,7 +146,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, int stride = fb->pitches[0]; /* depth is not actually interesting, we don't mask with it */ int depth = fb->format->cpp[0] * 8; - struct dma_buf_map surface_map; + struct iosys_map surface_map; uint8_t *surface_base; struct qxl_release *release; struct qxl_bo *clips_bo; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 359266d9e860..9796099ff18f 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -30,7 +30,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ -#include +#include #include #include #include @@ -50,7 +50,7 @@ #include "qxl_dev.h" -struct dma_buf_map; +struct iosys_map; #define DRIVER_AUTHOR "Dave Airlie" @@ -81,7 +81,7 @@ struct qxl_bo { /* Protected by tbo.reserved */ struct ttm_place placements[3]; struct ttm_placement placement; - struct dma_buf_map map; + struct iosys_map map; void *kptr; unsigned int map_count; int type; @@ -431,9 +431,9 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *qxl_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void qxl_gem_prime_vunmap(struct drm_gem_object *obj, - struct dma_buf_map *map); + struct iosys_map *map); /* qxl_irq.c */ int qxl_irq_init(struct qxl_device *qdev); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index fbb36e3e8564..b42a657e4c2f 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -23,7 +23,7 @@ * Alon Levy */ -#include +#include #include #include "qxl_drv.h" @@ -158,7 +158,7 @@ int qxl_bo_create(struct qxl_device *qdev, unsigned long size, return 0; } -int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) { int r; @@ -184,7 +184,7 @@ out: return 0; } -int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) { int r; @@ -210,7 +210,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, void *rptr; int ret; struct io_mapping *map; - struct dma_buf_map bo_map; + struct iosys_map bo_map; if (bo->tbo.resource->mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index cee4b52b75dd..53392cb90eec 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -59,8 +59,8 @@ extern int qxl_bo_create(struct qxl_device *qdev, u32 priority, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map); -int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map); +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map); +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map); int qxl_bo_vunmap(struct qxl_bo *bo); void qxl_bo_vunmap_locked(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 4a10cb0a413b..142d01415acb 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -54,7 +54,7 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table( return ERR_PTR(-ENOSYS); } -int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); int ret; @@ -67,7 +67,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) } void qxl_gem_prime_vunmap(struct drm_gem_object *obj, - struct dma_buf_map *map) + struct iosys_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a36a4f2c76b0..f563284a7fac 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -26,6 +26,7 @@ * Jerome Glisse */ +#include #include #include diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 63eb73b624aa..985584147da1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -510,7 +510,7 @@ err_free_rk_obj: return ERR_PTR(ret); } -int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); @@ -519,18 +519,19 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) pgprot_writecombine(PAGE_KERNEL)); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return -ENOMEM; - dma_buf_map_set_vaddr(map, rk_obj->kvaddr); + iosys_map_set_vaddr(map, rk_obj->kvaddr); return 0; } -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 47c1861eece0..72f59ac6d258 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -31,8 +31,9 @@ struct drm_gem_object * rockchip_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map); struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index fce0e52973c2..0063403ab5e1 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -174,7 +174,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map) static void *tegra_bo_mmap(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct dma_buf_map map; + struct iosys_map map; int ret; if (obj->vaddr) { @@ -191,7 +191,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr); if (obj->vaddr) return; @@ -699,17 +699,17 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) return __tegra_gem_mmap(gem, vma); } -static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map) +static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map) { struct drm_gem_object *gem = buf->priv; struct tegra_bo *bo = to_tegra_bo(gem); - dma_buf_map_set_vaddr(map, bo->vaddr); + iosys_map_set_vaddr(map, bo->vaddr); return 0; } -static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map) +static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map) { } diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index c95d9ff7d600..2dc5ffecf191 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,7 +16,7 @@ * Copyright 1999-2001 Jeff Garzik */ -#include +#include #include #include @@ -312,7 +312,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus, return 0; } -static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, + const struct iosys_map *map, struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); @@ -344,7 +345,8 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_ return 0; } -static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map) +static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, + const struct iosys_map *map) { struct drm_rect fullscreen = { .x1 = 0, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 6bc0c298739c..648e585d40a8 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -95,7 +95,7 @@ struct gm12u320_device { struct drm_rect rect; int frame; int draw_status_timeout; - struct dma_buf_map src_map; + struct iosys_map src_map; } fb_update; }; @@ -395,7 +395,8 @@ err: GM12U320_ERR("Frame update error: %d\n", ret); } -static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, + const struct iosys_map *map, struct drm_rect *dirty) { struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev); @@ -438,7 +439,7 @@ static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) mutex_lock(&gm12u320->fb_update.lock); old_fb = gm12u320->fb_update.fb; gm12u320->fb_update.fb = NULL; - dma_buf_map_clear(&gm12u320->fb_update.src_map); + iosys_map_clear(&gm12u320->fb_update.src_map); mutex_unlock(&gm12u320->fb_update.lock); drm_framebuffer_put(old_fb); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 72a94301bc95..4bf72470abef 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -93,7 +93,7 @@ void ttm_move_memcpy(bool clear, { const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; - struct dma_buf_map src_map, dst_map; + struct iosys_map src_map, dst_map; pgoff_t i; /* Single TTM move. NOP */ @@ -385,7 +385,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) { struct ttm_resource *mem = bo->resource; int ret; @@ -413,7 +413,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) if (!vaddr_iomem) return -ENOMEM; - dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + iosys_map_set_vaddr_iomem(map, vaddr_iomem); } else { struct ttm_operation_ctx ctx = { @@ -437,25 +437,25 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); } return 0; } EXPORT_SYMBOL(ttm_bo_vmap); -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) { struct ttm_resource *mem = bo->resource; - if (dma_buf_map_is_null(map)) + if (iosys_map_is_null(map)) return; if (!map->is_iomem) vunmap(map->vaddr); else if (!mem->bus.addr) iounmap(map->vaddr_iomem); - dma_buf_map_clear(map); + iosys_map_clear(map); ttm_mem_io_free(bo->bdev, bo->resource); } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 035d71332d18..df9efafa0f2f 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -22,7 +22,7 @@ * Authors: Christian König */ -#include +#include #include #include @@ -209,7 +209,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, EXPORT_SYMBOL(ttm_resource_manager_debug); static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_iomap *iter_io = @@ -236,11 +236,11 @@ retry: addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + (((resource_size_t)i - iter_io->cache.i) << PAGE_SHIFT)); - dma_buf_map_set_vaddr_iomem(dmap, addr); + iosys_map_set_vaddr_iomem(dmap, addr); } static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *map) + struct iosys_map *map) { io_mapping_unmap_local(map->vaddr_iomem); } @@ -291,14 +291,14 @@ EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); */ static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_linear_io *iter_io = container_of(iter, typeof(*iter_io), base); *dmap = iter_io->dmap; - dma_buf_map_incr(dmap, i * PAGE_SIZE); + iosys_map_incr(dmap, i * PAGE_SIZE); } static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { @@ -334,7 +334,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, } if (mem->bus.addr) { - dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr); + iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); iter_io->needs_unmap = false; } else { size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; @@ -342,23 +342,23 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, iter_io->needs_unmap = true; memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); if (mem->bus.caching == ttm_write_combined) - dma_buf_map_set_vaddr_iomem(&iter_io->dmap, - ioremap_wc(mem->bus.offset, - bus_size)); + iosys_map_set_vaddr_iomem(&iter_io->dmap, + ioremap_wc(mem->bus.offset, + bus_size)); else if (mem->bus.caching == ttm_cached) - dma_buf_map_set_vaddr(&iter_io->dmap, - memremap(mem->bus.offset, bus_size, - MEMREMAP_WB | - MEMREMAP_WT | - MEMREMAP_WC)); + iosys_map_set_vaddr(&iter_io->dmap, + memremap(mem->bus.offset, bus_size, + MEMREMAP_WB | + MEMREMAP_WT | + MEMREMAP_WC)); /* If uncached requested or if mapping cached or wc failed */ - if (dma_buf_map_is_null(&iter_io->dmap)) - dma_buf_map_set_vaddr_iomem(&iter_io->dmap, - ioremap(mem->bus.offset, - bus_size)); + if (iosys_map_is_null(&iter_io->dmap)) + iosys_map_set_vaddr_iomem(&iter_io->dmap, + ioremap(mem->bus.offset, + bus_size)); - if (dma_buf_map_is_null(&iter_io->dmap)) { + if (iosys_map_is_null(&iter_io->dmap)) { ret = -ENOMEM; goto out_io_free; } @@ -387,7 +387,7 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, struct ttm_device *bdev, struct ttm_resource *mem) { - if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) { + if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { if (iter_io->dmap.is_iomem) iounmap(iter_io->dmap.vaddr_iomem); else diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 79c870a3bef8..d234aab800a0 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -406,18 +406,18 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages) } static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_tt *iter_tt = container_of(iter, typeof(*iter_tt), base); - dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i], - iter_tt->prot)); + iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i], + iter_tt->prot)); } static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *map) + struct iosys_map *map) { kunmap_local(map->vaddr); } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 32232228dae9..e67c40a48fb4 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -264,7 +264,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y, return 0; } -static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static int udl_handle_damage(struct drm_framebuffer *fb, + const struct iosys_map *map, int x, int y, int width, int height) { struct drm_device *dev = fb->dev; diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 4227a915b06a..4017b0a621fc 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -10,7 +10,7 @@ * Hans de Goede */ -#include +#include #include #include @@ -398,7 +398,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, u32 height = new_state->crtc_h; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); - struct dma_buf_map map = shadow_plane_state->data[0]; + struct iosys_map map = shadow_plane_state->data[0]; u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */ size_t data_size, mask_size; u32 flags; diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index 9e8204be9a14..c6a1036bf2ea 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -157,7 +157,7 @@ static void compose_plane(struct vkms_composer *primary_composer, void *vaddr; void (*pixel_blend)(const u8 *p_src, u8 *p_dst); - if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) + if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) return; vaddr = plane_composer->map[0].vaddr; @@ -187,7 +187,7 @@ static int compose_active_planes(void **vaddr_out, } } - if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) + if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) return -EINVAL; vaddr = primary_composer->map[0].vaddr; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index d48c23d40ce5..18d944c57883 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -21,14 +21,14 @@ #define YRES_MAX 8192 struct vkms_writeback_job { - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; }; struct vkms_composer { struct drm_framebuffer fb; struct drm_rect src, dst; - struct dma_buf_map map[4]; + struct iosys_map map[4]; unsigned int offset; unsigned int pitch; unsigned int cpp; diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 32409e15244b..d8eb674b49a6 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -#include +#include #include #include diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 8694227f555f..af1604dfbbaf 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -#include +#include #include #include diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index dd358ba2bf8e..5a5bf4e5b717 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -280,7 +280,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return &xen_obj->base; } -int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, + struct iosys_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); void *vaddr; @@ -293,13 +294,13 @@ int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_ VM_MAP, PAGE_KERNEL); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map) + struct iosys_map *map) { vunmap(map->vaddr); } diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index eaea470f7001..a718a1f382a3 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -12,7 +12,7 @@ #define __XEN_DRM_FRONT_GEM_H struct dma_buf_attachment; -struct dma_buf_map; +struct iosys_map; struct drm_device; struct drm_gem_object; struct sg_table; @@ -32,9 +32,9 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj); void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map); + struct iosys_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map); + struct iosys_map *map); #endif /* __XEN_DRM_FRONT_GEM_H */ diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 7c4096e62173..ecf065cd4a67 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -99,7 +99,7 @@ static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv) return buf->vaddr; if (buf->db_attach) { - struct dma_buf_map map; + struct iosys_map map; if (!dma_buf_vmap(buf->db_attach->dmabuf, &map)) buf->vaddr = map.vaddr; @@ -446,7 +446,7 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, return 0; } -static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map) { struct vb2_dc_buf *buf; void *vaddr; @@ -456,7 +456,7 @@ static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) if (!vaddr) return -EINVAL; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } @@ -737,7 +737,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 90acafd9a290..f8a21c560ad2 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -303,7 +303,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - struct dma_buf_map map; + struct iosys_map map; int ret; BUG_ON(!buf); @@ -492,11 +492,12 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, return 0; } -static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) { struct vb2_dma_sg_buf *buf = dbuf->priv; - dma_buf_map_set_vaddr(map, buf->vaddr); + iosys_map_set_vaddr(map, buf->vaddr); return 0; } @@ -581,7 +582,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) { struct vb2_dma_sg_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c index 0bbfea66554f..948152f1596b 100644 --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c @@ -312,11 +312,12 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) vb2_vmalloc_put(dbuf->priv); } -static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) { struct vb2_vmalloc_buf *buf = dbuf->priv; - dma_buf_map_set_vaddr(map, buf->vaddr); + iosys_map_set_vaddr(map, buf->vaddr); return 0; } @@ -372,7 +373,7 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb, static int vb2_vmalloc_map_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map; + struct iosys_map map; int ret; ret = dma_buf_vmap(buf->dbuf, &map); @@ -386,7 +387,7 @@ static int vb2_vmalloc_map_dmabuf(void *mem_priv) static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); dma_buf_vunmap(buf->dbuf, &map); buf->vaddr = NULL; @@ -395,7 +396,7 @@ static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) static void vb2_vmalloc_detach_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (buf->vaddr) dma_buf_vunmap(buf->dbuf, &map); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4ccbf43e6bfa..5c0503655212 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -587,11 +587,11 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, kfree(a); } -static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct fastrpc_buf *buf = dmabuf->priv; - dma_buf_map_set_vaddr(map, buf->virt); + iosys_map_set_vaddr(map, buf->virt); return 0; } diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cc9de1632dd3..22deb216b59c 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,7 +35,7 @@ #include -struct dma_buf_map; +struct iosys_map; void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_sg(struct sg_table *st); @@ -74,7 +74,7 @@ static inline bool drm_arch_can_wc_memory(void) void drm_memcpy_init_early(void); -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len); #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index f07f2fb02e75..4fc8018eddda 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -3,7 +3,7 @@ #ifndef _DRM_CLIENT_H_ #define _DRM_CLIENT_H_ -#include +#include #include #include #include @@ -144,7 +144,7 @@ struct drm_client_buffer { /** * @map: Virtual address for the buffer */ - struct dma_buf_map map; + struct iosys_map map; /** * @fb: DRM framebuffer @@ -156,7 +156,8 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); -int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map); +int drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); int drm_client_modeset_create(struct drm_client_dev *client); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 35e7f44c2a75..e2941cee14b6 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -39,7 +39,7 @@ #include -struct dma_buf_map; +struct iosys_map; struct drm_gem_object; /** @@ -139,7 +139,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @vunmap: @@ -149,7 +149,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @mmap: diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 0b1e2dd2ac3f..6e3319e9001a 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -3,7 +3,7 @@ #ifndef __DRM_GEM_ATOMIC_HELPER_H__ #define __DRM_GEM_ATOMIC_HELPER_H__ -#include +#include #include #include @@ -59,7 +59,7 @@ struct drm_shadow_plane_state { * The memory mappings stored in map should be established in the plane's * prepare_fb callback and removed in the cleanup_fb callback. */ - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; /** * @data: Address of each framebuffer BO's data @@ -67,7 +67,7 @@ struct drm_shadow_plane_state { * The address of the data stored in each mapping. This is different * for framebuffers with non-zero offset fields. */ - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; }; /** diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index adb507a9dbf0..fbda4ce5d5fb 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -38,7 +38,8 @@ void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, struct drm_printer *p, unsigned int indent); struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); -int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, + struct iosys_map *map); int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); extern const struct vm_operations_struct drm_gem_cma_vm_ops; @@ -106,7 +107,8 @@ static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_ob * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 905727719ead..1091e4fa08cb 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -2,7 +2,7 @@ #define __DRM_GEM_FB_HELPER_H__ #include -#include +#include #include @@ -40,10 +40,10 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES], + struct iosys_map data[DRM_FORMAT_MAX_PLANES]); void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES]); int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 311d66c9cf4b..68347b63fc71 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -113,8 +113,10 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); @@ -226,7 +228,8 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); @@ -241,7 +244,8 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct d * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should * use it as their &drm_gem_object_funcs.vunmap handler. */ -static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 78040f6cc6f3..4c003b4f173e 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -10,7 +10,7 @@ #include #include -struct dma_buf_map; +struct iosys_map; #define drm_gem_ttm_of_gem(gem_obj) \ container_of(gem_obj, struct ttm_buffer_object, base) @@ -18,9 +18,9 @@ struct dma_buf_map; void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *gem); int drm_gem_ttm_vmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); void drm_gem_ttm_vunmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index b4ce27a72773..c083a1d71cf4 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -12,7 +12,7 @@ #include #include -#include +#include struct drm_mode_create_dumb; struct drm_plane; @@ -51,7 +51,7 @@ struct vm_area_struct; */ struct drm_gem_vram_object { struct ttm_buffer_object bo; - struct dma_buf_map map; + struct iosys_map map; /** * @vmap_use_count: @@ -97,8 +97,9 @@ void drm_gem_vram_put(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map); int drm_gem_vram_fill_create_dumb(struct drm_file *file, struct drm_device *dev, diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 54f2c58305d2..2a1d01e5b56b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -54,7 +54,7 @@ struct device; struct dma_buf_export_info; struct dma_buf; struct dma_buf_attachment; -struct dma_buf_map; +struct iosys_map; enum dma_data_direction; @@ -83,8 +83,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir); -int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map); -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map); +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c17b2df9178b..155b19ee12fb 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -47,7 +47,7 @@ struct ttm_global; struct ttm_device; -struct dma_buf_map; +struct iosys_map; struct drm_mm_node; @@ -481,17 +481,17 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * ttm_bo_vmap * * @bo: The buffer object. - * @map: pointer to a struct dma_buf_map representing the map. + * @map: pointer to a struct iosys_map representing the map. * * Sets up a kernel virtual mapping, using ioremap or vmap to the * data in the buffer object. The parameter @map returns the virtual - * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap(). + * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). * * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_vunmap @@ -501,7 +501,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); * * Unmaps a kernel map set up by ttm_bo_vmap(). */ -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index 8bb00fd39d6c..cc5c09a211b4 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -8,7 +8,7 @@ #include struct ttm_kmap_iter; -struct dma_buf_map; +struct iosys_map; /** * struct ttm_kmap_iter_ops - Ops structure for a struct @@ -24,22 +24,22 @@ struct ttm_kmap_iter_ops { * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. * @i: The location within the resource to map. PAGE_SIZE granularity. */ void (*map_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap, pgoff_t i); + struct iosys_map *dmap, pgoff_t i); /** * unmap_local() - Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. */ void (*unmap_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap); + struct iosys_map *dmap); bool maps_tt; }; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 5952051091cd..6eae09e382d2 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include #include @@ -41,7 +41,7 @@ struct ttm_resource; struct ttm_place; struct ttm_buffer_object; struct ttm_placement; -struct dma_buf_map; +struct iosys_map; struct io_mapping; struct sg_table; struct scatterlist; @@ -207,7 +207,7 @@ struct ttm_kmap_iter_iomap { */ struct ttm_kmap_iter_linear_io { struct ttm_kmap_iter base; - struct dma_buf_map dmap; + struct iosys_map dmap; bool needs_unmap; }; diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h deleted file mode 100644 index 278d489e4bdd..000000000000 --- a/include/linux/dma-buf-map.h +++ /dev/null @@ -1,266 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Pointer to dma-buf-mapped memory, plus helpers. - */ - -#ifndef __DMA_BUF_MAP_H__ -#define __DMA_BUF_MAP_H__ - -#include -#include - -/** - * DOC: overview - * - * Calling dma-buf's vmap operation returns a pointer to the buffer's memory. - * Depending on the location of the buffer, users may have to access it with - * I/O operations or memory load/store operations. For example, copying to - * system memory could be done with memcpy(), copying to I/O memory would be - * done with memcpy_toio(). - * - * .. code-block:: c - * - * void *vaddr = ...; // pointer to system memory - * memcpy(vaddr, src, len); - * - * void *vaddr_iomem = ...; // pointer to I/O memory - * memcpy_toio(vaddr, _iomem, src, len); - * - * When using dma-buf's vmap operation, the returned pointer is encoded as - * :c:type:`struct dma_buf_map `. - * :c:type:`struct dma_buf_map ` stores the buffer's address in - * system or I/O memory and a flag that signals the required method of - * accessing the buffer. Use the returned instance and the helper functions - * to access the buffer's memory in the correct way. - * - * The type :c:type:`struct dma_buf_map ` and its helpers are - * actually independent from the dma-buf infrastructure. When sharing buffers - * among devices, drivers have to know the location of the memory to access - * the buffers in a safe way. :c:type:`struct dma_buf_map ` - * solves this problem for dma-buf and its users. If other drivers or - * sub-systems require similar functionality, the type could be generalized - * and moved to a more prominent header file. - * - * Open-coding access to :c:type:`struct dma_buf_map ` is - * considered bad style. Rather then accessing its fields directly, use one - * of the provided helper functions, or implement your own. For example, - * instances of :c:type:`struct dma_buf_map ` can be initialized - * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with - * dma_buf_map_set_vaddr(). These helpers will set an address in system memory. - * - * .. code-block:: c - * - * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf); - * - * dma_buf_map_set_vaddr(&map. 0xdeadbeaf); - * - * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). - * - * .. code-block:: c - * - * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf); - * - * Instances of struct dma_buf_map do not have to be cleaned up, but - * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings - * always refer to system memory. - * - * .. code-block:: c - * - * dma_buf_map_clear(&map); - * - * Test if a mapping is valid with either dma_buf_map_is_set() or - * dma_buf_map_is_null(). - * - * .. code-block:: c - * - * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map)) - * // always true - * - * Instances of :c:type:`struct dma_buf_map ` can be compared - * for equality with dma_buf_map_is_equal(). Mappings the point to different - * memory spaces, system or I/O, are never equal. That's even true if both - * spaces are located in the same address space, both mappings contain the - * same address value, or both mappings refer to NULL. - * - * .. code-block:: c - * - * struct dma_buf_map sys_map; // refers to system memory - * struct dma_buf_map io_map; // refers to I/O memory - * - * if (dma_buf_map_is_equal(&sys_map, &io_map)) - * // always false - * - * A set up instance of struct dma_buf_map can be used to access or manipulate - * the buffer memory. Depending on the location of the memory, the provided - * helpers will pick the correct operations. Data can be copied into the memory - * with dma_buf_map_memcpy_to(). The address can be manipulated with - * dma_buf_map_incr(). - * - * .. code-block:: c - * - * const void *src = ...; // source buffer - * size_t len = ...; // length of src - * - * dma_buf_map_memcpy_to(&map, src, len); - * dma_buf_map_incr(&map, len); // go to first byte after the memcpy - */ - -/** - * struct dma_buf_map - Pointer to vmap'ed dma-buf memory. - * @vaddr_iomem: The buffer's address if in I/O memory - * @vaddr: The buffer's address if in system memory - * @is_iomem: True if the dma-buf memory is located in I/O - * memory, or false otherwise. - */ -struct dma_buf_map { - union { - void __iomem *vaddr_iomem; - void *vaddr; - }; - bool is_iomem; -}; - -/** - * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory - * @vaddr_: A system-memory address - */ -#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \ - { \ - .vaddr = (vaddr_), \ - .is_iomem = false, \ - } - -/** - * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory - * @map: The dma-buf mapping structure - * @vaddr: A system-memory address - * - * Sets the address and clears the I/O-memory flag. - */ -static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) -{ - map->vaddr = vaddr; - map->is_iomem = false; -} - -/** - * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory - * @map: The dma-buf mapping structure - * @vaddr_iomem: An I/O-memory address - * - * Sets the address and the I/O-memory flag. - */ -static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map, - void __iomem *vaddr_iomem) -{ - map->vaddr_iomem = vaddr_iomem; - map->is_iomem = true; -} - -/** - * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality - * @lhs: The dma-buf mapping structure - * @rhs: A dma-buf mapping structure to compare with - * - * Two dma-buf mapping structures are equal if they both refer to the same type of memory - * and to the same address within that memory. - * - * Returns: - * True is both structures are equal, or false otherwise. - */ -static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs, - const struct dma_buf_map *rhs) -{ - if (lhs->is_iomem != rhs->is_iomem) - return false; - else if (lhs->is_iomem) - return lhs->vaddr_iomem == rhs->vaddr_iomem; - else - return lhs->vaddr == rhs->vaddr; -} - -/** - * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL - * @map: The dma-buf mapping structure - * - * Depending on the state of struct dma_buf_map.is_iomem, tests if the - * mapping is NULL. - * - * Returns: - * True if the mapping is NULL, or false otherwise. - */ -static inline bool dma_buf_map_is_null(const struct dma_buf_map *map) -{ - if (map->is_iomem) - return !map->vaddr_iomem; - return !map->vaddr; -} - -/** - * dma_buf_map_is_set - Tests is the dma-buf mapping has been set - * @map: The dma-buf mapping structure - * - * Depending on the state of struct dma_buf_map.is_iomem, tests if the - * mapping has been set. - * - * Returns: - * True if the mapping is been set, or false otherwise. - */ -static inline bool dma_buf_map_is_set(const struct dma_buf_map *map) -{ - return !dma_buf_map_is_null(map); -} - -/** - * dma_buf_map_clear - Clears a dma-buf mapping structure - * @map: The dma-buf mapping structure - * - * Clears all fields to zero; including struct dma_buf_map.is_iomem. So - * mapping structures that were set to point to I/O memory are reset for - * system memory. Pointers are cleared to NULL. This is the default. - */ -static inline void dma_buf_map_clear(struct dma_buf_map *map) -{ - if (map->is_iomem) { - map->vaddr_iomem = NULL; - map->is_iomem = false; - } else { - map->vaddr = NULL; - } -} - -/** - * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping - * @dst: The dma-buf mapping structure - * @src: The source buffer - * @len: The number of byte in src - * - * Copies data into a dma-buf mapping. The source buffer is in system - * memory. Depending on the buffer's location, the helper picks the correct - * method of accessing the memory. - */ -static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len) -{ - if (dst->is_iomem) - memcpy_toio(dst->vaddr_iomem, src, len); - else - memcpy(dst->vaddr, src, len); -} - -/** - * dma_buf_map_incr - Increments the address stored in a dma-buf mapping - * @map: The dma-buf mapping structure - * @incr: The number of bytes to increment - * - * Increments the address stored in a dma-buf mapping. Depending on the - * buffer's location, the correct value will be updated. - */ -static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr) -{ - if (map->is_iomem) - map->vaddr_iomem += incr; - else - map->vaddr += incr; -} - -#endif /* __DMA_BUF_MAP_H__ */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 7ab50076e7a6..2097760e8e95 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -13,7 +13,7 @@ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ -#include +#include #include #include #include @@ -283,8 +283,8 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); - int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); - void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); + int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); + void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map); }; /** @@ -347,7 +347,7 @@ struct dma_buf { * @vmap_ptr: * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. */ - struct dma_buf_map vmap_ptr; + struct iosys_map vmap_ptr; /** * @exp_name: @@ -628,6 +628,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h new file mode 100644 index 000000000000..f4186f91caa6 --- /dev/null +++ b/include/linux/iosys-map.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Pointer abstraction for IO/system memory + */ + +#ifndef __IOSYS_MAP_H__ +#define __IOSYS_MAP_H__ + +#include +#include + +/** + * DOC: overview + * + * When accessing a memory region, depending on its location, users may have to + * access it with I/O operations or memory load/store operations. For example, + * copying to system memory could be done with memcpy(), copying to I/O memory + * would be done with memcpy_toio(). + * + * .. code-block:: c + * + * void *vaddr = ...; // pointer to system memory + * memcpy(vaddr, src, len); + * + * void *vaddr_iomem = ...; // pointer to I/O memory + * memcpy_toio(vaddr, _iomem, src, len); + * + * The user of such pointer may not have information about the mapping of that + * region or may want to have a single code path to handle operations on that + * buffer, regardless if it's located in system or IO memory. The type + * :c:type:`struct iosys_map ` and its helpers abstract that so the + * buffer can be passed around to other drivers or have separate duties inside + * the same driver for allocation, read and write operations. + * + * Open-coding access to :c:type:`struct iosys_map ` is considered + * bad style. Rather then accessing its fields directly, use one of the provided + * helper functions, or implement your own. For example, instances of + * :c:type:`struct iosys_map ` can be initialized statically with + * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These + * helpers will set an address in system memory. + * + * .. code-block:: c + * + * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf); + * + * iosys_map_set_vaddr(&map, 0xdeadbeaf); + * + * To set an address in I/O memory, use iosys_map_set_vaddr_iomem(). + * + * .. code-block:: c + * + * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf); + * + * Instances of struct iosys_map do not have to be cleaned up, but + * can be cleared to NULL with iosys_map_clear(). Cleared mappings + * always refer to system memory. + * + * .. code-block:: c + * + * iosys_map_clear(&map); + * + * Test if a mapping is valid with either iosys_map_is_set() or + * iosys_map_is_null(). + * + * .. code-block:: c + * + * if (iosys_map_is_set(&map) != iosys_map_is_null(&map)) + * // always true + * + * Instances of :c:type:`struct iosys_map ` can be compared for + * equality with iosys_map_is_equal(). Mappings that point to different memory + * spaces, system or I/O, are never equal. That's even true if both spaces are + * located in the same address space, both mappings contain the same address + * value, or both mappings refer to NULL. + * + * .. code-block:: c + * + * struct iosys_map sys_map; // refers to system memory + * struct iosys_map io_map; // refers to I/O memory + * + * if (iosys_map_is_equal(&sys_map, &io_map)) + * // always false + * + * A set up instance of struct iosys_map can be used to access or manipulate the + * buffer memory. Depending on the location of the memory, the provided helpers + * will pick the correct operations. Data can be copied into the memory with + * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr(). + * + * .. code-block:: c + * + * const void *src = ...; // source buffer + * size_t len = ...; // length of src + * + * iosys_map_memcpy_to(&map, src, len); + * iosys_map_incr(&map, len); // go to first byte after the memcpy + */ + +/** + * struct iosys_map - Pointer to IO/system memory + * @vaddr_iomem: The buffer's address if in I/O memory + * @vaddr: The buffer's address if in system memory + * @is_iomem: True if the buffer is located in I/O memory, or false + * otherwise. + */ +struct iosys_map { + union { + void __iomem *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +/** + * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory + * @vaddr_: A system-memory address + */ +#define IOSYS_MAP_INIT_VADDR(vaddr_) \ + { \ + .vaddr = (vaddr_), \ + .is_iomem = false, \ + } + +/** + * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory + * @map: The iosys_map structure + * @vaddr: A system-memory address + * + * Sets the address and clears the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr) +{ + map->vaddr = vaddr; + map->is_iomem = false; +} + +/** + * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory + * @map: The iosys_map structure + * @vaddr_iomem: An I/O-memory address + * + * Sets the address and the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map, + void __iomem *vaddr_iomem) +{ + map->vaddr_iomem = vaddr_iomem; + map->is_iomem = true; +} + +/** + * iosys_map_is_equal - Compares two iosys mapping structures for equality + * @lhs: The iosys_map structure + * @rhs: A iosys_map structure to compare with + * + * Two iosys mapping structures are equal if they both refer to the same type of memory + * and to the same address within that memory. + * + * Returns: + * True is both structures are equal, or false otherwise. + */ +static inline bool iosys_map_is_equal(const struct iosys_map *lhs, + const struct iosys_map *rhs) +{ + if (lhs->is_iomem != rhs->is_iomem) + return false; + else if (lhs->is_iomem) + return lhs->vaddr_iomem == rhs->vaddr_iomem; + else + return lhs->vaddr == rhs->vaddr; +} + +/** + * iosys_map_is_null - Tests for a iosys mapping to be NULL + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping is NULL. + * + * Returns: + * True if the mapping is NULL, or false otherwise. + */ +static inline bool iosys_map_is_null(const struct iosys_map *map) +{ + if (map->is_iomem) + return !map->vaddr_iomem; + return !map->vaddr; +} + +/** + * iosys_map_is_set - Tests if the iosys mapping has been set + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping has been set. + * + * Returns: + * True if the mapping is been set, or false otherwise. + */ +static inline bool iosys_map_is_set(const struct iosys_map *map) +{ + return !iosys_map_is_null(map); +} + +/** + * iosys_map_clear - Clears a iosys mapping structure + * @map: The iosys_map structure + * + * Clears all fields to zero, including struct iosys_map.is_iomem, so + * mapping structures that were set to point to I/O memory are reset for + * system memory. Pointers are cleared to NULL. This is the default. + */ +static inline void iosys_map_clear(struct iosys_map *map) +{ + if (map->is_iomem) { + map->vaddr_iomem = NULL; + map->is_iomem = false; + } else { + map->vaddr = NULL; + } +} + +/** + * iosys_map_memcpy_to - Memcpy into iosys mapping + * @dst: The iosys_map structure + * @src: The source buffer + * @len: The number of byte in src + * + * Copies data into a iosys mapping. The source buffer is in system + * memory. Depending on the buffer's location, the helper picks the correct + * method of accessing the memory. + */ +static inline void iosys_map_memcpy_to(struct iosys_map *dst, const void *src, + size_t len) +{ + if (dst->is_iomem) + memcpy_toio(dst->vaddr_iomem, src, len); + else + memcpy(dst->vaddr, src, len); +} + +/** + * iosys_map_incr - Increments the address stored in a iosys mapping + * @map: The iosys_map structure + * @incr: The number of bytes to increment + * + * Increments the address stored in a iosys mapping. Depending on the + * buffer's location, the correct value will be updated. + */ +static inline void iosys_map_incr(struct iosys_map *map, size_t incr) +{ + if (map->is_iomem) + map->vaddr_iomem += incr; + else + map->vaddr += incr; +} + +#endif /* __IOSYS_MAP_H__ */ -- cgit v1.2.3-71-gd317 From 3486bedd99196ecdfe99c0ab5b67ad3c47e8a8fa Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 4 Feb 2022 10:57:35 -0800 Subject: bpf: Use bytes instead of pages for bpf_jit_[charge|uncharge]_modmem This enables sub-page memory charge and allocation. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220204185742.271030-3-song@kernel.org --- include/linux/bpf.h | 4 ++-- kernel/bpf/core.c | 17 ++++++++--------- kernel/bpf/trampoline.c | 6 +++--- 3 files changed, 13 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6eb0b180d33b..366f88afd56b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -846,8 +846,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); -int bpf_jit_charge_modmem(u32 pages); -void bpf_jit_uncharge_modmem(u32 pages); +int bpf_jit_charge_modmem(u32 size); +void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 04a8d5bea552..6ca0550c4b24 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -833,12 +833,11 @@ static int __init bpf_jit_charge_init(void) } pure_initcall(bpf_jit_charge_init); -int bpf_jit_charge_modmem(u32 pages) +int bpf_jit_charge_modmem(u32 size) { - if (atomic_long_add_return(pages, &bpf_jit_current) > - (bpf_jit_limit >> PAGE_SHIFT)) { + if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) { if (!bpf_capable()) { - atomic_long_sub(pages, &bpf_jit_current); + atomic_long_sub(size, &bpf_jit_current); return -EPERM; } } @@ -846,9 +845,9 @@ int bpf_jit_charge_modmem(u32 pages) return 0; } -void bpf_jit_uncharge_modmem(u32 pages) +void bpf_jit_uncharge_modmem(u32 size) { - atomic_long_sub(pages, &bpf_jit_current); + atomic_long_sub(size, &bpf_jit_current); } void *__weak bpf_jit_alloc_exec(unsigned long size) @@ -879,11 +878,11 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); pages = size / PAGE_SIZE; - if (bpf_jit_charge_modmem(pages)) + if (bpf_jit_charge_modmem(size)) return NULL; hdr = bpf_jit_alloc_exec(size); if (!hdr) { - bpf_jit_uncharge_modmem(pages); + bpf_jit_uncharge_modmem(size); return NULL; } @@ -906,7 +905,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr) u32 pages = hdr->pages; bpf_jit_free_exec(hdr); - bpf_jit_uncharge_modmem(pages); + bpf_jit_uncharge_modmem(pages << PAGE_SHIFT); } /* This symbol is only overridden by archs that have different diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 4b6974a195c1..e76a488c09c3 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -213,7 +213,7 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work) im = container_of(work, struct bpf_tramp_image, work); bpf_image_ksym_del(&im->ksym); bpf_jit_free_exec(im->image); - bpf_jit_uncharge_modmem(1); + bpf_jit_uncharge_modmem(PAGE_SIZE); percpu_ref_exit(&im->pcref); kfree_rcu(im, rcu); } @@ -310,7 +310,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) if (!im) goto out; - err = bpf_jit_charge_modmem(1); + err = bpf_jit_charge_modmem(PAGE_SIZE); if (err) goto out_free_im; @@ -332,7 +332,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) out_free_image: bpf_jit_free_exec(im->image); out_uncharge: - bpf_jit_uncharge_modmem(1); + bpf_jit_uncharge_modmem(PAGE_SIZE); out_free_im: kfree(im); out: -- cgit v1.2.3-71-gd317 From ed2d9e1a26cca963ff5ed3b76326d70f7d8201a9 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 4 Feb 2022 10:57:36 -0800 Subject: bpf: Use size instead of pages in bpf_binary_header This is necessary to charge sub page memory for the BPF program. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220204185742.271030-4-song@kernel.org --- include/linux/filter.h | 6 +++--- kernel/bpf/core.c | 11 +++++------ 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index d23e999dc032..5855eb474c62 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -548,7 +548,7 @@ struct sock_fprog_kern { #define BPF_IMAGE_ALIGNMENT 8 struct bpf_binary_header { - u32 pages; + u32 size; u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; @@ -886,8 +886,8 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); - set_memory_ro((unsigned long)hdr, hdr->pages); - set_memory_x((unsigned long)hdr, hdr->pages); + set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT); + set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } static inline struct bpf_binary_header * diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 6ca0550c4b24..14199228a6f0 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -543,7 +543,7 @@ bpf_prog_ksym_set_addr(struct bpf_prog *prog) WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); prog->aux->ksym.start = (unsigned long) prog->bpf_func; - prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE; + prog->aux->ksym.end = addr + hdr->size; } static void @@ -866,7 +866,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_fill_hole_t bpf_fill_ill_insns) { struct bpf_binary_header *hdr; - u32 size, hole, start, pages; + u32 size, hole, start; WARN_ON_ONCE(!is_power_of_2(alignment) || alignment > BPF_IMAGE_ALIGNMENT); @@ -876,7 +876,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, * random section of illegal instructions. */ size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); - pages = size / PAGE_SIZE; if (bpf_jit_charge_modmem(size)) return NULL; @@ -889,7 +888,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, /* Fill space with illegal/arch-dep instructions. */ bpf_fill_ill_insns(hdr, size); - hdr->pages = pages; + hdr->size = size; hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); start = (get_random_int() % hole) & ~(alignment - 1); @@ -902,10 +901,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, void bpf_jit_binary_free(struct bpf_binary_header *hdr) { - u32 pages = hdr->pages; + u32 size = hdr->size; bpf_jit_free_exec(hdr); - bpf_jit_uncharge_modmem(pages << PAGE_SHIFT); + bpf_jit_uncharge_modmem(size); } /* This symbol is only overridden by archs that have different -- cgit v1.2.3-71-gd317 From ebc1415d9b4f043cef5a1fb002ec316e32167e7a Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 4 Feb 2022 10:57:39 -0800 Subject: bpf: Introduce bpf_arch_text_copy This will be used to copy JITed text to RO protected module memory. On x86, bpf_arch_text_copy is implemented with text_poke_copy. bpf_arch_text_copy returns pointer to dst on success, and ERR_PTR(errno) on errors. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220204185742.271030-7-song@kernel.org --- arch/x86/net/bpf_jit_comp.c | 7 +++++++ include/linux/bpf.h | 2 ++ kernel/bpf/core.c | 5 +++++ 3 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 36f6fc3e6e69..c13d148f7396 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2412,3 +2412,10 @@ bool bpf_jit_supports_kfunc_call(void) { return true; } + +void *bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + if (text_poke_copy(dst, src, len) == NULL) + return ERR_PTR(-EINVAL); + return dst; +} diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 366f88afd56b..ea0d7fd4a410 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2362,6 +2362,8 @@ enum bpf_text_poke_type { int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *addr1, void *addr2); +void *bpf_arch_text_copy(void *dst, void *src, size_t len); + struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index e3fe53df0a71..a5ec480f9862 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2440,6 +2440,11 @@ int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, return -ENOTSUPP; } +void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + return ERR_PTR(-ENOTSUPP); +} + DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); EXPORT_SYMBOL(bpf_stats_enabled_key); -- cgit v1.2.3-71-gd317 From 33c9805860e584b194199cab1a1e81f4e6395408 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 4 Feb 2022 10:57:41 -0800 Subject: bpf: Introduce bpf_jit_binary_pack_[alloc|finalize|free] This is the jit binary allocator built on top of bpf_prog_pack. bpf_prog_pack allocates RO memory, which cannot be used directly by the JIT engine. Therefore, a temporary rw buffer is allocated for the JIT engine. Once JIT is done, bpf_jit_binary_pack_finalize is used to copy the program to the RO memory. bpf_jit_binary_pack_alloc reserves 16 bytes of extra space for illegal instructions, which is small than the 128 bytes space reserved by bpf_jit_binary_alloc. This change is necessary for bpf_jit_binary_hdr to find the correct header. Also, flag use_bpf_prog_pack is added to differentiate a program allocated by bpf_jit_binary_pack_alloc. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220204185742.271030-9-song@kernel.org --- include/linux/bpf.h | 1 + include/linux/filter.h | 21 +++++----- kernel/bpf/core.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 120 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ea0d7fd4a410..2fc7e5c5ef41 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -953,6 +953,7 @@ struct bpf_prog_aux { bool sleepable; bool tail_call_reachable; bool xdp_has_frags; + bool use_bpf_prog_pack; struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; diff --git a/include/linux/filter.h b/include/linux/filter.h index 5855eb474c62..1cb1af917617 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -890,15 +890,6 @@ static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } -static inline struct bpf_binary_header * -bpf_jit_binary_hdr(const struct bpf_prog *fp) -{ - unsigned long real_start = (unsigned long)fp->bpf_func; - unsigned long addr = real_start & PAGE_MASK; - - return (void *)addr; -} - int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { @@ -1068,6 +1059,18 @@ void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_binary_header * +bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, + unsigned int alignment, + struct bpf_binary_header **rw_hdr, + u8 **rw_image, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, + struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header); +void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header); + int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, struct bpf_jit_poke_descriptor *poke); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7ae590897b73..306aa63fa58e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1031,6 +1031,109 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr) bpf_jit_uncharge_modmem(size); } +/* Allocate jit binary from bpf_prog_pack allocator. + * Since the allocated memory is RO+X, the JIT engine cannot write directly + * to the memory. To solve this problem, a RW buffer is also allocated at + * as the same time. The JIT engine should calculate offsets based on the + * RO memory address, but write JITed program to the RW buffer. Once the + * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies + * the JITed program to the RO memory. + */ +struct bpf_binary_header * +bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + struct bpf_binary_header **rw_header, + u8 **rw_image, + bpf_jit_fill_hole_t bpf_fill_ill_insns) +{ + struct bpf_binary_header *ro_header; + u32 size, hole, start; + + WARN_ON_ONCE(!is_power_of_2(alignment) || + alignment > BPF_IMAGE_ALIGNMENT); + + /* add 16 bytes for a random section of illegal instructions */ + size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE); + + if (bpf_jit_charge_modmem(size)) + return NULL; + ro_header = bpf_prog_pack_alloc(size); + if (!ro_header) { + bpf_jit_uncharge_modmem(size); + return NULL; + } + + *rw_header = kvmalloc(size, GFP_KERNEL); + if (!*rw_header) { + bpf_prog_pack_free(ro_header); + bpf_jit_uncharge_modmem(size); + return NULL; + } + + /* Fill space with illegal/arch-dep instructions. */ + bpf_fill_ill_insns(*rw_header, size); + (*rw_header)->size = size; + + hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), + BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); + start = (get_random_int() % hole) & ~(alignment - 1); + + *image_ptr = &ro_header->image[start]; + *rw_image = &(*rw_header)->image[start]; + + return ro_header; +} + +/* Copy JITed text from rw_header to its final location, the ro_header. */ +int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, + struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header) +{ + void *ptr; + + ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size); + + kvfree(rw_header); + + if (IS_ERR(ptr)) { + bpf_prog_pack_free(ro_header); + return PTR_ERR(ptr); + } + prog->aux->use_bpf_prog_pack = true; + return 0; +} + +/* bpf_jit_binary_pack_free is called in two different scenarios: + * 1) when the program is freed after; + * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). + * For case 2), we need to free both the RO memory and the RW buffer. + * Also, ro_header->size in 2) is not properly set yet, so rw_header->size + * is used for uncharge. + */ +void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header) +{ + u32 size = rw_header ? rw_header->size : ro_header->size; + + bpf_prog_pack_free(ro_header); + kvfree(rw_header); + bpf_jit_uncharge_modmem(size); +} + +static inline struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr; + + if (fp->aux->use_bpf_prog_pack) + addr = real_start & BPF_PROG_CHUNK_MASK; + else + addr = real_start & PAGE_MASK; + + return (void *)addr; +} + /* This symbol is only overridden by archs that have different * requirements than the usual eBPF JITs, f.e. when they only * implement cBPF JIT, do not set images read-only, etc. @@ -1040,7 +1143,10 @@ void __weak bpf_jit_free(struct bpf_prog *fp) if (fp->jited) { struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); - bpf_jit_binary_free(hdr); + if (fp->aux->use_bpf_prog_pack) + bpf_jit_binary_pack_free(hdr, NULL /* rw_buffer */); + else + bpf_jit_binary_free(hdr); WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); } -- cgit v1.2.3-71-gd317 From 976b6d97c62347df3e686f60a5f455bb8ed6ea23 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 19 Jan 2022 11:17:32 +0100 Subject: dma-buf: consolidate dma_fence subclass checking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consolidate the wrapper functions to check for dma_fence subclasses in the dma_fence header. This makes it easier to document and also check the different requirements for fence containers in the subclasses. Signed-off-by: Christian König Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20220204100429.2049-2-christian.koenig@amd.com --- include/linux/dma-fence-array.h | 15 +-------------- include/linux/dma-fence-chain.h | 3 +-- include/linux/dma-fence.h | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 303dd712220f..fec374f69e12 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -45,19 +45,6 @@ struct dma_fence_array { struct irq_work work; }; -extern const struct dma_fence_ops dma_fence_array_ops; - -/** - * dma_fence_is_array - check if a fence is from the array subsclass - * @fence: fence to test - * - * Return true if it is a dma_fence_array and false otherwise. - */ -static inline bool dma_fence_is_array(struct dma_fence *fence) -{ - return fence->ops == &dma_fence_array_ops; -} - /** * to_dma_fence_array - cast a fence to a dma_fence_array * @fence: fence to cast to a dma_fence_array @@ -68,7 +55,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence) static inline struct dma_fence_array * to_dma_fence_array(struct dma_fence *fence) { - if (fence->ops != &dma_fence_array_ops) + if (!fence || !dma_fence_is_array(fence)) return NULL; return container_of(fence, struct dma_fence_array, base); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 54fe3443fd2c..ee906b659694 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -49,7 +49,6 @@ struct dma_fence_chain { spinlock_t lock; }; -extern const struct dma_fence_ops dma_fence_chain_ops; /** * to_dma_fence_chain - cast a fence to a dma_fence_chain @@ -61,7 +60,7 @@ extern const struct dma_fence_ops dma_fence_chain_ops; static inline struct dma_fence_chain * to_dma_fence_chain(struct dma_fence *fence) { - if (!fence || fence->ops != &dma_fence_chain_ops) + if (!fence || !dma_fence_is_chain(fence)) return NULL; return container_of(fence, struct dma_fence_chain, base); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..775cdc0b4f24 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); +extern const struct dma_fence_ops dma_fence_array_ops; +extern const struct dma_fence_ops dma_fence_chain_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * dma_fence_is_chain - check if a fence is from the chain subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_chain and false otherwise. + */ +static inline bool dma_fence_is_chain(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_chain_ops; +} + +/** + * dma_fence_is_container - check if a fence is a container for other fences + * @fence: the fence to test + * + * Return true if this fence is a container for other fences, false otherwise. + * This is important since we can't build up large fence structure or otherwise + * we run into recursion during operation on those fences. + */ +static inline bool dma_fence_is_container(struct dma_fence *fence) +{ + return dma_fence_is_array(fence) || dma_fence_is_chain(fence); +} + #endif /* __LINUX_DMA_FENCE_H */ -- cgit v1.2.3-71-gd317 From 18f5fad275efef015226ee4f90eae34d8f44aa5e Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 20 Jan 2022 11:42:40 +0100 Subject: dma-buf: add dma_fence_chain_contained helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's a reoccurring pattern that we need to extract the fence from a dma_fence_chain object. Add a helper for this. Signed-off-by: Christian König Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20220204100429.2049-6-christian.koenig@amd.com --- drivers/dma-buf/dma-fence-chain.c | 6 ++---- include/linux/dma-fence-chain.h | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 084c6927b735..06f8ef97c6e8 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -148,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) dma_fence_get(&head->base); dma_fence_chain_for_each(fence, &head->base) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); dma_fence_get(f); if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { @@ -165,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) static bool dma_fence_chain_signaled(struct dma_fence *fence) { dma_fence_chain_for_each(fence, fence) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); if (!dma_fence_is_signaled(f)) { dma_fence_put(fence); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index ee906b659694..10d51bcdf7b7 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -66,6 +66,21 @@ to_dma_fence_chain(struct dma_fence *fence) return container_of(fence, struct dma_fence_chain, base); } +/** + * dma_fence_chain_contained - return the contained fence + * @fence: the fence to test + * + * If the fence is a dma_fence_chain the function returns the fence contained + * inside the chain object, otherwise it returns the fence itself. + */ +static inline struct dma_fence * +dma_fence_chain_contained(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + + return chain ? chain->fence : fence; +} + /** * dma_fence_chain_alloc * -- cgit v1.2.3-71-gd317 From 5913eb45d036288babb67e97b210233537ef7b90 Mon Sep 17 00:00:00 2001 From: Alistair Francis Date: Mon, 24 Jan 2022 22:10:04 +1000 Subject: mfd: simple-mfd-i2c: Enable support for the silergy,sy7636a Signed-off-by: Alistair Francis Signed-off-by: Lee Jones --- drivers/mfd/simple-mfd-i2c.c | 11 +++++++++++ include/linux/mfd/sy7636a.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 include/linux/mfd/sy7636a.h (limited to 'include/linux') diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c index 51536691ad9d..f4c8fc3ee463 100644 --- a/drivers/mfd/simple-mfd-i2c.c +++ b/drivers/mfd/simple-mfd-i2c.c @@ -62,8 +62,19 @@ static int simple_mfd_i2c_probe(struct i2c_client *i2c) return ret; } +static const struct mfd_cell sy7636a_cells[] = { + { .name = "sy7636a-regulator", }, + { .name = "sy7636a-temperature", }, +}; + +static const struct simple_mfd_data silergy_sy7636a = { + .mfd_cell = sy7636a_cells, + .mfd_cell_size = ARRAY_SIZE(sy7636a_cells), +}; + static const struct of_device_id simple_mfd_i2c_of_match[] = { { .compatible = "kontron,sl28cpld" }, + { .compatible = "silergy,sy7636a", .data = &silergy_sy7636a}, {} }; MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match); diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h new file mode 100644 index 000000000000..22f03b2f851e --- /dev/null +++ b/include/linux/mfd/sy7636a.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Functions to access SY3686A power management chip. + * + * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/ + */ + +#ifndef __MFD_SY7636A_H +#define __MFD_SY7636A_H + +#define SY7636A_REG_OPERATION_MODE_CRL 0x00 +/* It is set if a gpio is used to control the regulator */ +#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6) +#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7) +#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01 +#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02 +#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff +#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03 +#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06 +#define SY7636A_REG_FAULT_FLAG 0x07 +#define SY7636A_FAULT_FLAG_PG BIT(0) +#define SY7636A_REG_TERMISTOR_READOUT 0x08 + +#define SY7636A_REG_MAX 0x08 + +#define VCOM_ADJUST_CTRL_MASK 0x1ff +// Used to shift the high byte +#define VCOM_ADJUST_CTRL_SHIFT 8 +// Used to scale from VCOM_ADJUST_CTRL to mv +#define VCOM_ADJUST_CTRL_SCAL 10000 + +#define FAULT_FLAG_SHIFT 1 + +#endif /* __LINUX_MFD_SY7636A_H */ -- cgit v1.2.3-71-gd317 From fac608138c6136126faadafa5554cc0bbabf3c44 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:18 -0800 Subject: VMCI: dma dg: whitespace formatting change for vmci register defines Update formatting of existing register defines in preparation for adding additional register definitions for the VMCI device. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-2-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- include/linux/vmw_vmci_defs.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index e36cb114c188..9911ecfc18ba 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -12,15 +12,15 @@ #include /* Register offsets. */ -#define VMCI_STATUS_ADDR 0x00 -#define VMCI_CONTROL_ADDR 0x04 -#define VMCI_ICR_ADDR 0x08 -#define VMCI_IMR_ADDR 0x0c -#define VMCI_DATA_OUT_ADDR 0x10 -#define VMCI_DATA_IN_ADDR 0x14 -#define VMCI_CAPS_ADDR 0x18 -#define VMCI_RESULT_LOW_ADDR 0x1c -#define VMCI_RESULT_HIGH_ADDR 0x20 +#define VMCI_STATUS_ADDR 0x00 +#define VMCI_CONTROL_ADDR 0x04 +#define VMCI_ICR_ADDR 0x08 +#define VMCI_IMR_ADDR 0x0c +#define VMCI_DATA_OUT_ADDR 0x10 +#define VMCI_DATA_IN_ADDR 0x14 +#define VMCI_CAPS_ADDR 0x18 +#define VMCI_RESULT_LOW_ADDR 0x1c +#define VMCI_RESULT_HIGH_ADDR 0x20 /* Max number of devices. */ #define VMCI_MAX_DEVICES 1 -- cgit v1.2.3-71-gd317 From e283a0e8b7ea83915e988ed059384af166b444c0 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:19 -0800 Subject: VMCI: dma dg: add MMIO access to registers Detect the support for MMIO access through examination of the length of the region requested in BAR1. If it is 256KB, the VMCI device supports MMIO access to registers. If MMIO access is supported, map the area of the region used for MMIO access (64KB size at offset 128KB). Add wrapper functions for accessing 32 bit register accesses through either MMIO or IO ports based on device configuration. Sending and receiving datagrams through iowrite8_rep/ioread8_rep is left unchanged for now, and will be addressed in a later change. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-3-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 67 +++++++++++++++++++++++++++----------- include/linux/vmw_vmci_defs.h | 12 +++++++ 2 files changed, 60 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 1018dc77269d..d30d66258e52 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -45,6 +45,7 @@ static u32 vm_context_id = VMCI_INVALID_ID; struct vmci_guest_device { struct device *dev; /* PCI device we are attached to */ void __iomem *iobase; + void __iomem *mmio_base; bool exclusive_vectors; @@ -89,6 +90,21 @@ u32 vmci_get_vm_context_id(void) return vm_context_id; } +static unsigned int vmci_read_reg(struct vmci_guest_device *dev, u32 reg) +{ + if (dev->mmio_base != NULL) + return readl(dev->mmio_base + reg); + return ioread32(dev->iobase + reg); +} + +static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg) +{ + if (dev->mmio_base != NULL) + writel(val, dev->mmio_base + reg); + else + iowrite32(val, dev->iobase + reg); +} + /* * VM to hypervisor call mechanism. We use the standard VMware naming * convention since shared code is calling this function as well. @@ -116,7 +132,7 @@ int vmci_send_datagram(struct vmci_datagram *dg) if (vmci_dev_g) { iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, dg, VMCI_DG_SIZE(dg)); - result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); + result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); } else { result = VMCI_ERROR_UNAVAILABLE; } @@ -384,7 +400,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev) unsigned int icr; /* Acknowledge interrupt and determine what needs doing. */ - icr = ioread32(dev->iobase + VMCI_ICR_ADDR); + icr = vmci_read_reg(dev, VMCI_ICR_ADDR); if (icr == 0 || icr == ~0) return IRQ_NONE; @@ -429,7 +445,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, const struct pci_device_id *id) { struct vmci_guest_device *vmci_dev; - void __iomem *iobase; + void __iomem *iobase = NULL; + void __iomem *mmio_base = NULL; unsigned int capabilities; unsigned int caps_in_use; unsigned long cmd; @@ -445,16 +462,29 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, return error; } - error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); - if (error) { - dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); - return error; - } + /* + * The VMCI device with mmio access to registers requests 256KB + * for BAR1. If present, driver will use new VMCI device + * functionality for register access and datagram send/recv. + */ - iobase = pcim_iomap_table(pdev)[0]; + if (pci_resource_len(pdev, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE) { + dev_info(&pdev->dev, "MMIO register access is available\n"); + mmio_base = pci_iomap_range(pdev, 1, VMCI_MMIO_ACCESS_OFFSET, + VMCI_MMIO_ACCESS_SIZE); + /* If the map fails, we fall back to IOIO access. */ + if (!mmio_base) + dev_warn(&pdev->dev, "Failed to map MMIO register access\n"); + } - dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", - (unsigned long)iobase, pdev->irq); + if (!mmio_base) { + error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); + if (error) { + dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); + return error; + } + iobase = pcim_iomap_table(pdev)[0]; + } vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); if (!vmci_dev) { @@ -466,6 +496,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, vmci_dev->dev = &pdev->dev; vmci_dev->exclusive_vectors = false; vmci_dev->iobase = iobase; + vmci_dev->mmio_base = mmio_base; tasklet_init(&vmci_dev->datagram_tasklet, vmci_dispatch_dgs, (unsigned long)vmci_dev); @@ -490,7 +521,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * * Right now, we need datagrams. There are no fallbacks. */ - capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); + capabilities = vmci_read_reg(vmci_dev, VMCI_CAPS_ADDR); if (!(capabilities & VMCI_CAPS_DATAGRAM)) { dev_err(&pdev->dev, "Device does not support datagrams\n"); error = -ENXIO; @@ -534,7 +565,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); /* Let the host know which capabilities we intend to use. */ - iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); + vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR); /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); @@ -630,11 +661,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, cmd = VMCI_IMR_DATAGRAM; if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) cmd |= VMCI_IMR_NOTIFICATION; - iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); + vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR); /* Enable interrupts. */ - iowrite32(VMCI_CONTROL_INT_ENABLE, - vmci_dev->iobase + VMCI_CONTROL_ADDR); + vmci_write_reg(vmci_dev, VMCI_CONTROL_INT_ENABLE, VMCI_CONTROL_ADDR); pci_set_drvdata(pdev, vmci_dev); @@ -657,8 +687,7 @@ err_disable_msi: err_remove_bitmap: if (vmci_dev->notification_bitmap) { - iowrite32(VMCI_CONTROL_RESET, - vmci_dev->iobase + VMCI_CONTROL_ADDR); + vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR); dma_free_coherent(&pdev->dev, PAGE_SIZE, vmci_dev->notification_bitmap, vmci_dev->notification_base); @@ -700,7 +729,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) spin_unlock_irq(&vmci_dev_spinlock); dev_dbg(&pdev->dev, "Resetting vmci device\n"); - iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); + vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR); /* * Free IRQ and then disable MSI/MSI-X as appropriate. For diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 9911ecfc18ba..8fc00e2685cf 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -82,6 +82,18 @@ enum { */ #define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024)) +/* + * The version of the VMCI device that supports MMIO access to registers + * requests 256KB for BAR1 whereas the version of VMCI that supports + * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is: + * - the first 128KB are used for MSI/MSI-X. + * - the following 64KB are used for MMIO register access. + * - the remaining 64KB are unused. + */ +#define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024)) +#define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024)) +#define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024)) + /* * We have a fixed set of resource IDs available in the VMX. * This allows us to have a very simple implementation since we statically -- cgit v1.2.3-71-gd317 From eed2298d936087a1c85e0fa6f7170028e4f4fded Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:20 -0800 Subject: VMCI: dma dg: detect DMA datagram capability Detect the VMCI DMA datagram capability, and if present, ack it to the device. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-4-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 11 +++++++++++ include/linux/vmw_vmci_defs.h | 1 + 2 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index d30d66258e52..b93afe7f7119 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -562,6 +562,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, } } + if (mmio_base != NULL) { + if (capabilities & VMCI_CAPS_DMA_DATAGRAM) { + caps_in_use |= VMCI_CAPS_DMA_DATAGRAM; + } else { + dev_err(&pdev->dev, + "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n"); + error = -ENXIO; + goto err_free_data_buffer; + } + } + dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); /* Let the host know which capabilities we intend to use. */ diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 8fc00e2685cf..1ce2cffdc3ae 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -39,6 +39,7 @@ #define VMCI_CAPS_DATAGRAM BIT(2) #define VMCI_CAPS_NOTIFICATIONS BIT(3) #define VMCI_CAPS_PPN64 BIT(4) +#define VMCI_CAPS_DMA_DATAGRAM BIT(5) /* Interrupt Cause register bits. */ #define VMCI_ICR_DATAGRAM BIT(0) -- cgit v1.2.3-71-gd317 From 8cb520bea1470ca205980fbf030ed1f472f4af2f Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:21 -0800 Subject: VMCI: dma dg: set OS page size Tell the device the page size used by the OS. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-5-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 4 ++++ include/linux/vmw_vmci_defs.h | 1 + 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index b93afe7f7119..ced187e7ac08 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -578,6 +578,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, /* Let the host know which capabilities we intend to use. */ vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR); + /* Let the device know the size for pages passed down. */ + if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) + vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT); + /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = vmci_dev; diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 1ce2cffdc3ae..4167779469fd 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -21,6 +21,7 @@ #define VMCI_CAPS_ADDR 0x18 #define VMCI_RESULT_LOW_ADDR 0x1c #define VMCI_RESULT_HIGH_ADDR 0x20 +#define VMCI_GUEST_PAGE_SHIFT 0x34 /* Max number of devices. */ #define VMCI_MAX_DEVICES 1 -- cgit v1.2.3-71-gd317 From cc68f2177fcbfe2dbe5e9514789b96ba5995ec1e Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:22 -0800 Subject: VMCI: dma dg: register dummy IRQ handlers for DMA datagrams Register dummy interrupt handlers for DMA datagrams in preparation for DMA datagram receive operations. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-6-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 42 +++++++++++++++++++++++++++++++++++--- include/linux/vmw_vmci_defs.h | 14 +++++++++++-- 2 files changed, 51 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index ced187e7ac08..acef19c562b3 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -414,6 +414,9 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev) icr &= ~VMCI_ICR_NOTIFICATION; } + if (icr & VMCI_ICR_DMA_DATAGRAM) + icr &= ~VMCI_ICR_DMA_DATAGRAM; + if (icr != 0) dev_warn(dev->dev, "Ignoring unknown interrupt cause (%d)\n", @@ -438,6 +441,16 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) return IRQ_HANDLED; } +/* + * Interrupt handler for MSI-X interrupt vector VMCI_INTR_DMA_DATAGRAM, + * which is for the completion of a DMA datagram send or receive operation. + * Will only get called if we are using MSI-X with exclusive vectors. + */ +static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev) +{ + return IRQ_HANDLED; +} + /* * Most of the initialization at module load time is done here. */ @@ -447,6 +460,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, struct vmci_guest_device *vmci_dev; void __iomem *iobase = NULL; void __iomem *mmio_base = NULL; + unsigned int num_irq_vectors; unsigned int capabilities; unsigned int caps_in_use; unsigned long cmd; @@ -627,8 +641,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * Enable interrupts. Try MSI-X first, then MSI, and then fallback on * legacy interrupts. */ - error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, - PCI_IRQ_MSIX); + if (vmci_dev->mmio_base != NULL) + num_irq_vectors = VMCI_MAX_INTRS; + else + num_irq_vectors = VMCI_MAX_INTRS_NOTIFICATION; + error = pci_alloc_irq_vectors(pdev, num_irq_vectors, num_irq_vectors, + PCI_IRQ_MSIX); if (error < 0) { error = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); @@ -666,6 +684,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, pci_irq_vector(pdev, 1), error); goto err_free_irq; } + if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { + error = request_irq(pci_irq_vector(pdev, 2), + vmci_interrupt_dma_datagram, + 0, KBUILD_MODNAME, vmci_dev); + if (error) { + dev_err(&pdev->dev, + "Failed to allocate irq %u: %d\n", + pci_irq_vector(pdev, 2), error); + goto err_free_bm_irq; + } + } } dev_dbg(&pdev->dev, "Registered device\n"); @@ -676,6 +705,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, cmd = VMCI_IMR_DATAGRAM; if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) cmd |= VMCI_IMR_NOTIFICATION; + if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) + cmd |= VMCI_IMR_DMA_DATAGRAM; vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR); /* Enable interrupts. */ @@ -686,6 +717,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, vmci_call_vsock_callback(false); return 0; +err_free_bm_irq: + free_irq(pci_irq_vector(pdev, 1), vmci_dev); err_free_irq: free_irq(pci_irq_vector(pdev, 0), vmci_dev); tasklet_kill(&vmci_dev->datagram_tasklet); @@ -751,8 +784,11 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) * MSI-X, we might have multiple vectors, each with their own * IRQ, which we must free too. */ - if (vmci_dev->exclusive_vectors) + if (vmci_dev->exclusive_vectors) { free_irq(pci_irq_vector(pdev, 1), vmci_dev); + if (vmci_dev->mmio_base != NULL) + free_irq(pci_irq_vector(pdev, 2), vmci_dev); + } free_irq(pci_irq_vector(pdev, 0), vmci_dev); pci_free_irq_vectors(pdev); diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 4167779469fd..2b70c024dacb 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -45,13 +45,22 @@ /* Interrupt Cause register bits. */ #define VMCI_ICR_DATAGRAM BIT(0) #define VMCI_ICR_NOTIFICATION BIT(1) +#define VMCI_ICR_DMA_DATAGRAM BIT(2) /* Interrupt Mask register bits. */ #define VMCI_IMR_DATAGRAM BIT(0) #define VMCI_IMR_NOTIFICATION BIT(1) +#define VMCI_IMR_DMA_DATAGRAM BIT(2) -/* Maximum MSI/MSI-X interrupt vectors in the device. */ -#define VMCI_MAX_INTRS 2 +/* + * Maximum MSI/MSI-X interrupt vectors in the device. + * If VMCI_CAPS_DMA_DATAGRAM is supported by the device, + * VMCI_MAX_INTRS_DMA_DATAGRAM vectors are available, + * otherwise only VMCI_MAX_INTRS_NOTIFICATION. + */ +#define VMCI_MAX_INTRS_NOTIFICATION 2 +#define VMCI_MAX_INTRS_DMA_DATAGRAM 3 +#define VMCI_MAX_INTRS VMCI_MAX_INTRS_DMA_DATAGRAM /* * Supported interrupt vectors. There is one for each ICR value above, @@ -60,6 +69,7 @@ enum { VMCI_INTR_DATAGRAM = 0, VMCI_INTR_NOTIFICATION = 1, + VMCI_INTR_DMA_DATAGRAM = 2, }; /* -- cgit v1.2.3-71-gd317 From 5ee109828e73bbe4213c373988608d8f33e03d78 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:23 -0800 Subject: VMCI: dma dg: allocate send and receive buffers for DMA datagrams If DMA datagrams are used, allocate send and receive buffers in coherent DMA memory. This is done in preparation for the send and receive datagram operations, where the buffers are used for the exchange of data between driver and device. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-7-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 71 +++++++++++++++++++++++++++++++++----- include/linux/vmw_vmci_defs.h | 4 +++ 2 files changed, 66 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index acef19c562b3..36eade15ba87 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -31,6 +31,12 @@ #define VMCI_UTIL_NUM_RESOURCES 1 +/* + * Datagram buffers for DMA send/receive must accommodate at least + * a maximum sized datagram and the header. + */ +#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE) + static bool vmci_disable_msi; module_param_named(disable_msi, vmci_disable_msi, bool, 0); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); @@ -53,6 +59,9 @@ struct vmci_guest_device { struct tasklet_struct bm_tasklet; void *data_buffer; + dma_addr_t data_buffer_base; + void *tx_buffer; + dma_addr_t tx_buffer_base; void *notification_bitmap; dma_addr_t notification_base; }; @@ -451,6 +460,24 @@ static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev) return IRQ_HANDLED; } +static void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev) +{ + if (vmci_dev->mmio_base != NULL) { + if (vmci_dev->tx_buffer != NULL) + dma_free_coherent(vmci_dev->dev, + VMCI_DMA_DG_BUFFER_SIZE, + vmci_dev->tx_buffer, + vmci_dev->tx_buffer_base); + if (vmci_dev->data_buffer != NULL) + dma_free_coherent(vmci_dev->dev, + VMCI_DMA_DG_BUFFER_SIZE, + vmci_dev->data_buffer, + vmci_dev->data_buffer_base); + } else { + vfree(vmci_dev->data_buffer); + } +} + /* * Most of the initialization at module load time is done here. */ @@ -517,11 +544,27 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, tasklet_init(&vmci_dev->bm_tasklet, vmci_process_bitmap, (unsigned long)vmci_dev); - vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); + if (mmio_base != NULL) { + vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, + &vmci_dev->tx_buffer_base, + GFP_KERNEL); + if (!vmci_dev->tx_buffer) { + dev_err(&pdev->dev, + "Can't allocate memory for datagram tx buffer\n"); + return -ENOMEM; + } + + vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, + &vmci_dev->data_buffer_base, + GFP_KERNEL); + } else { + vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); + } if (!vmci_dev->data_buffer) { dev_err(&pdev->dev, "Can't allocate memory for datagram buffer\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_free_data_buffers; } pci_set_master(pdev); /* To enable queue_pair functionality. */ @@ -539,7 +582,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, if (!(capabilities & VMCI_CAPS_DATAGRAM)) { dev_err(&pdev->dev, "Device does not support datagrams\n"); error = -ENXIO; - goto err_free_data_buffer; + goto err_free_data_buffers; } caps_in_use = VMCI_CAPS_DATAGRAM; @@ -583,7 +626,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, dev_err(&pdev->dev, "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n"); error = -ENXIO; - goto err_free_data_buffer; + goto err_free_data_buffers; } } @@ -592,10 +635,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, /* Let the host know which capabilities we intend to use. */ vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR); - /* Let the device know the size for pages passed down. */ - if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) + if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { + /* Let the device know the size for pages passed down. */ vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT); + /* Configure the high order parts of the data in/out buffers. */ + vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base), + VMCI_DATA_IN_HIGH_ADDR); + vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base), + VMCI_DATA_OUT_HIGH_ADDR); + } + /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = vmci_dev; @@ -747,8 +797,8 @@ err_remove_vmci_dev_g: vmci_dev_g = NULL; spin_unlock_irq(&vmci_dev_spinlock); -err_free_data_buffer: - vfree(vmci_dev->data_buffer); +err_free_data_buffers: + vmci_free_dg_buffers(vmci_dev); /* The rest are managed resources and will be freed by PCI core */ return error; @@ -806,7 +856,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) vmci_dev->notification_base); } - vfree(vmci_dev->data_buffer); + vmci_free_dg_buffers(vmci_dev); + + if (vmci_dev->mmio_base != NULL) + pci_iounmap(pdev, vmci_dev->mmio_base); /* The rest are managed resources and will be freed by PCI core */ } diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 2b70c024dacb..8bc37d8244a8 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -21,6 +21,10 @@ #define VMCI_CAPS_ADDR 0x18 #define VMCI_RESULT_LOW_ADDR 0x1c #define VMCI_RESULT_HIGH_ADDR 0x20 +#define VMCI_DATA_OUT_LOW_ADDR 0x24 +#define VMCI_DATA_OUT_HIGH_ADDR 0x28 +#define VMCI_DATA_IN_LOW_ADDR 0x2c +#define VMCI_DATA_IN_HIGH_ADDR 0x30 #define VMCI_GUEST_PAGE_SHIFT 0x34 /* Max number of devices. */ -- cgit v1.2.3-71-gd317 From 22aa5c7f323022477b70e044eb00e6bfea9498e8 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Mon, 7 Feb 2022 02:27:24 -0800 Subject: VMCI: dma dg: add support for DMA datagrams sends Use DMA based send operation from the transmit buffer instead of the iowrite8_rep based datagram send when DMA datagrams are supported. The outgoing datagram is sent as inline data in the VMCI transmit buffer. Once the header has been configured, the send is initiated by writing the lower 32 bit of the buffer base address to the VMCI_DATA_OUT_LOW_ADDR register. Only then will the device process the header and the datagram itself. Following that, the driver busy waits (it isn't possible to sleep on the send path) for the header busy flag to change - indicating that the send is complete. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/20220207102725.2742-8-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_guest.c | 45 ++++++++++++++++++++++++++++++++++++-- include/linux/vmw_vmci_defs.h | 34 ++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 36eade15ba87..bf524217914e 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -114,6 +115,47 @@ static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg) iowrite32(val, dev->iobase + reg); } +static int vmci_write_data(struct vmci_guest_device *dev, + struct vmci_datagram *dg) +{ + int result; + + if (dev->mmio_base != NULL) { + struct vmci_data_in_out_header *buffer_header = dev->tx_buffer; + u8 *dg_out_buffer = (u8 *)(buffer_header + 1); + + if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) + return VMCI_ERROR_INVALID_ARGS; + + /* + * Initialize send buffer with outgoing datagram + * and set up header for inline data. Device will + * not access buffer asynchronously - only after + * the write to VMCI_DATA_OUT_LOW_ADDR. + */ + memcpy(dg_out_buffer, dg, VMCI_DG_SIZE(dg)); + buffer_header->opcode = 0; + buffer_header->size = VMCI_DG_SIZE(dg); + buffer_header->busy = 1; + + vmci_write_reg(dev, lower_32_bits(dev->tx_buffer_base), + VMCI_DATA_OUT_LOW_ADDR); + + /* Caller holds a spinlock, so cannot block. */ + spin_until_cond(buffer_header->busy == 0); + + result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); + if (result == VMCI_SUCCESS) + result = (int)buffer_header->result; + } else { + iowrite8_rep(dev->iobase + VMCI_DATA_OUT_ADDR, + dg, VMCI_DG_SIZE(dg)); + result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); + } + + return result; +} + /* * VM to hypervisor call mechanism. We use the standard VMware naming * convention since shared code is calling this function as well. @@ -139,8 +181,7 @@ int vmci_send_datagram(struct vmci_datagram *dg) spin_lock_irqsave(&vmci_dev_spinlock, flags); if (vmci_dev_g) { - iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, - dg, VMCI_DG_SIZE(dg)); + vmci_write_data(vmci_dev_g, dg); result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR); } else { result = VMCI_ERROR_UNAVAILABLE; diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 8bc37d8244a8..6fb663b36f72 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -110,6 +110,40 @@ enum { #define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024)) #define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024)) +/* + * For VMCI devices supporting the VMCI_CAPS_DMA_DATAGRAM capability, the + * sending and receiving of datagrams can be performed using DMA to/from + * a driver allocated buffer. + * Sending and receiving will be handled as follows: + * - when sending datagrams, the driver initializes the buffer where the + * data part will refer to the outgoing VMCI datagram, sets the busy flag + * to 1 and writes the address of the buffer to VMCI_DATA_OUT_HIGH_ADDR + * and VMCI_DATA_OUT_LOW_ADDR. Writing to VMCI_DATA_OUT_LOW_ADDR triggers + * the device processing of the buffer. When the device has processed the + * buffer, it will write the result value to the buffer and then clear the + * busy flag. + * - when receiving datagrams, the driver initializes the buffer where the + * data part will describe the receive buffer, clears the busy flag and + * writes the address of the buffer to VMCI_DATA_IN_HIGH_ADDR and + * VMCI_DATA_IN_LOW_ADDR. Writing to VMCI_DATA_IN_LOW_ADDR triggers the + * device processing of the buffer. The device will copy as many available + * datagrams into the buffer as possible, and then sets the busy flag. + * When the busy flag is set, the driver will process the datagrams in the + * buffer. + */ +struct vmci_data_in_out_header { + uint32_t busy; + uint32_t opcode; + uint32_t size; + uint32_t rsvd; + uint64_t result; +}; + +struct vmci_sg_elem { + uint64_t addr; + uint64_t size; +}; + /* * We have a fixed set of resource IDs available in the VMX. * This allows us to have a very simple implementation since we statically -- cgit v1.2.3-71-gd317 From 3301bc53358a0eb0a0db65fd7a513cd4cb50c83a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 10 Jan 2022 15:29:45 +0800 Subject: lib/sbitmap: kill 'depth' from sbitmap_word Only the last sbitmap_word can have different depth, and all the others must have same depth of 1U << sb->shift, so not necessary to store it in sbitmap_word, and it can be retrieved easily and efficiently by adding one internal helper of __map_depth(sb, index). Remove 'depth' field from sbitmap_word, then the annotation of ____cacheline_aligned_in_smp for 'word' isn't needed any more. Not see performance effect when running high parallel IOPS test on null_blk. This way saves us one cacheline(usually 64 words) per each sbitmap_word. Cc: Martin Wilck Signed-off-by: Ming Lei Reviewed-by: Martin Wilck Reviewed-by: John Garry Link: https://lore.kernel.org/r/20220110072945.347535-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 17 ++++++++++------- lib/sbitmap.c | 34 ++++++++++++++-------------------- 2 files changed, 24 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 95df357ec009..df3b584b0f0c 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -27,15 +27,10 @@ struct seq_file; * struct sbitmap_word - Word in a &struct sbitmap. */ struct sbitmap_word { - /** - * @depth: Number of bits being used in @word/@cleared - */ - unsigned long depth; - /** * @word: word holding free bits */ - unsigned long word ____cacheline_aligned_in_smp; + unsigned long word; /** * @cleared: word holding cleared bits @@ -164,6 +159,14 @@ struct sbitmap_queue { int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node, bool round_robin, bool alloc_hint); +/* sbitmap internal helper */ +static inline unsigned int __map_depth(const struct sbitmap *sb, int index) +{ + if (index == sb->map_nr - 1) + return sb->depth - (index << sb->shift); + return 1U << sb->shift; +} + /** * sbitmap_free() - Free memory used by a &struct sbitmap. * @sb: Bitmap to free. @@ -251,7 +254,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb, while (scanned < sb->depth) { unsigned long word; unsigned int depth = min_t(unsigned int, - sb->map[index].depth - nr, + __map_depth(sb, index) - nr, sb->depth - scanned); scanned += depth; diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 09d293c30fd2..b7cb96ae4701 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -85,7 +85,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, bool alloc_hint) { unsigned int bits_per_word; - unsigned int i; if (shift < 0) shift = sbitmap_calculate_shift(depth); @@ -117,10 +116,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, return -ENOMEM; } - for (i = 0; i < sb->map_nr; i++) { - sb->map[i].depth = min(depth, bits_per_word); - depth -= sb->map[i].depth; - } return 0; } EXPORT_SYMBOL_GPL(sbitmap_init_node); @@ -135,11 +130,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); - - for (i = 0; i < sb->map_nr; i++) { - sb->map[i].depth = min(depth, bits_per_word); - depth -= sb->map[i].depth; - } } EXPORT_SYMBOL_GPL(sbitmap_resize); @@ -184,8 +174,8 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, int nr; do { - nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, - !sb->round_robin); + nr = __sbitmap_get_word(&map->word, __map_depth(sb, index), + alloc_hint, !sb->round_robin); if (nr != -1) break; if (!sbitmap_deferred_clear(map)) @@ -257,7 +247,9 @@ static int __sbitmap_get_shallow(struct sbitmap *sb, for (i = 0; i < sb->map_nr; i++) { again: nr = __sbitmap_get_word(&sb->map[index].word, - min(sb->map[index].depth, shallow_depth), + min_t(unsigned int, + __map_depth(sb, index), + shallow_depth), SB_NR_TO_BIT(sb, alloc_hint), true); if (nr != -1) { nr += index << sb->shift; @@ -315,11 +307,12 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; + unsigned int word_depth = __map_depth(sb, i); if (set) - weight += bitmap_weight(&word->word, word->depth); + weight += bitmap_weight(&word->word, word_depth); else - weight += bitmap_weight(&word->cleared, word->depth); + weight += bitmap_weight(&word->cleared, word_depth); } return weight; } @@ -367,7 +360,7 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) for (i = 0; i < sb->map_nr; i++) { unsigned long word = READ_ONCE(sb->map[i].word); unsigned long cleared = READ_ONCE(sb->map[i].cleared); - unsigned int word_bits = READ_ONCE(sb->map[i].depth); + unsigned int word_bits = __map_depth(sb, i); word &= ~cleared; @@ -531,15 +524,16 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, for (i = 0; i < sb->map_nr; i++) { struct sbitmap_word *map = &sb->map[index]; unsigned long get_mask; + unsigned int map_depth = __map_depth(sb, index); sbitmap_deferred_clear(map); - if (map->word == (1UL << (map->depth - 1)) - 1) + if (map->word == (1UL << (map_depth - 1)) - 1) continue; - nr = find_first_zero_bit(&map->word, map->depth); - if (nr + nr_tags <= map->depth) { + nr = find_first_zero_bit(&map->word, map_depth); + if (nr + nr_tags <= map_depth) { atomic_long_t *ptr = (atomic_long_t *) &map->word; - int map_tags = min_t(int, nr_tags, map->depth); + int map_tags = min_t(int, nr_tags, map_depth); unsigned long val, ret; get_mask = ((1UL << map_tags) - 1) << nr; -- cgit v1.2.3-71-gd317 From 3f607293b74d6acb06571a774a500143c1f0ed2c Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 8 Feb 2022 20:07:04 +0800 Subject: sbitmap: Delete old sbitmap_queue_get_shallow() Since __sbitmap_queue_get_shallow() was introduced in commit c05e66733788 ("sbitmap: add sbitmap_get_shallow() operation"), it has not been used. Delete __sbitmap_queue_get_shallow() and rename public __sbitmap_queue_get_shallow() -> sbitmap_queue_get_shallow() as it is odd to have public __foo but no foo at all. Signed-off-by: John Garry Link: https://lore.kernel.org/r/1644322024-105340-1-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 2 +- include/linux/sbitmap.h | 34 ++++------------------------------ lib/sbitmap.c | 6 +++--- 3 files changed, 8 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 845f74e8dd7b..0fd409b8e86e 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -107,7 +107,7 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, return BLK_MQ_NO_TAG; if (data->shallow_depth) - return __sbitmap_queue_get_shallow(bt, data->shallow_depth); + return sbitmap_queue_get_shallow(bt, data->shallow_depth); else return __sbitmap_queue_get(bt); } diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index df3b584b0f0c..dffeb8281c2d 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -135,7 +135,7 @@ struct sbitmap_queue { /** * @min_shallow_depth: The minimum shallow depth which may be passed to - * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). + * sbitmap_queue_get_shallow() */ unsigned int min_shallow_depth; }; @@ -463,7 +463,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, unsigned int *offset); /** - * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct + * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption * already disabled. * @sbq: Bitmap queue to allocate from. @@ -475,8 +475,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ -int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int shallow_depth); +int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int shallow_depth); /** * sbitmap_queue_get() - Try to allocate a free bit from a &struct @@ -498,32 +498,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, return nr; } -/** - * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct - * sbitmap_queue, limiting the depth used from each word. - * @sbq: Bitmap queue to allocate from. - * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to - * sbitmap_queue_clear()). - * @shallow_depth: The maximum number of bits to allocate from a single word. - * See sbitmap_get_shallow(). - * - * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after - * initializing @sbq. - * - * Return: Non-negative allocated bit number if successful, -1 otherwise. - */ -static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int *cpu, - unsigned int shallow_depth) -{ - int nr; - - *cpu = get_cpu(); - nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); - put_cpu(); - return nr; -} - /** * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the * minimum shallow depth that will be used. diff --git a/lib/sbitmap.c b/lib/sbitmap.c index b7cb96ae4701..2eb3de18ded3 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -557,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, return 0; } -int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int shallow_depth) +int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int shallow_depth) { WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); return sbitmap_get_shallow(&sbq->sb, shallow_depth); } -EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); +EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, unsigned int min_shallow_depth) -- cgit v1.2.3-71-gd317 From 2093057ab879da1070c851b9e07126eaa86d0dfc Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:55 +0000 Subject: perf: Fix wrong name in comment for struct perf_cpu_context Commit 0793a61d4df8 ("performance counters: core code") added the perf subsystem (then called Performance Counters) to Linux, creating the struct perf_cpu_context. The comment for the struct referred to it as a "struct perf_counter_cpu_context". Commit cdd6c482c9ff ("perf: Do the big rename: Performance Counters -> Performance Events") changed the comment to refer to a "struct perf_event_cpu_context", which was still the wrong name for the struct. Change the comment to say "struct perf_cpu_context". CC: Thomas Gleixner CC: Ingo Molnar Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-3-alexandru.elisei@arm.com --- include/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 733649184b27..af97dd427501 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -864,7 +864,7 @@ struct perf_event_context { #define PERF_NR_CONTEXTS 4 /** - * struct perf_event_cpu_context - per cpu event context structure + * struct perf_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; -- cgit v1.2.3-71-gd317 From c6c89783eba05a5e159b07cfd8c68d841cc5de42 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 28 Jan 2022 15:39:36 -0800 Subject: fscrypt: add functions for direct I/O support Encrypted files traditionally haven't supported DIO, due to the need to encrypt/decrypt the data. However, when the encryption is implemented using inline encryption (blk-crypto) instead of the traditional filesystem-layer encryption, it is straightforward to support DIO. In preparation for supporting this, add the following functions: - fscrypt_dio_supported() checks whether a DIO request is supported as far as encryption is concerned. Encrypted files will only support DIO when inline encryption is used and the I/O request is properly aligned; this function checks these preconditions. - fscrypt_limit_io_blocks() limits the length of a bio to avoid crossing a place in the file that a bio with an encryption context cannot cross due to a DUN discontiguity. This function is needed by filesystems that use the iomap DIO implementation (which operates directly on logical ranges, so it won't use fscrypt_mergeable_bio()) and that support FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32. Co-developed-by: Satya Tangirala Signed-off-by: Satya Tangirala Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220128233940.79464-2-ebiggers@kernel.org Signed-off-by: Eric Biggers --- fs/crypto/crypto.c | 8 +++++ fs/crypto/inline_crypt.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fscrypt.h | 18 ++++++++++ 3 files changed, 119 insertions(+) (limited to 'include/linux') diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 4ef3f714046a..4fcca79f39ae 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -69,6 +69,14 @@ void fscrypt_free_bounce_page(struct page *bounce_page) } EXPORT_SYMBOL(fscrypt_free_bounce_page); +/* + * Generate the IV for the given logical block number within the given file. + * For filenames encryption, lblk_num == 0. + * + * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks() + * needs to know about any IV generation methods where the low bits of IV don't + * simply contain the lblk_num (e.g., IV_INO_LBLK_32). + */ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci) { diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index c57bebfa48fe..93c2ca858092 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "fscrypt_private.h" @@ -315,6 +316,10 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); * * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. * + * This function isn't required in cases where crypto-mergeability is ensured in + * another way, such as I/O targeting only a single file (and thus a single key) + * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity. + * * Return: true iff the I/O is mergeable */ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, @@ -363,3 +368,91 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio, return fscrypt_mergeable_bio(bio, inode, next_lblk); } EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); + +/** + * fscrypt_dio_supported() - check whether a DIO (direct I/O) request is + * supported as far as encryption is concerned + * @iocb: the file and position the I/O is targeting + * @iter: the I/O data segment(s) + * + * Return: %true if there are no encryption constraints that prevent DIO from + * being supported; %false if DIO is unsupported. (Note that in the + * %true case, the filesystem might have other, non-encryption-related + * constraints that prevent DIO from actually being supported.) + */ +bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) +{ + const struct inode *inode = file_inode(iocb->ki_filp); + const unsigned int blocksize = i_blocksize(inode); + + /* If the file is unencrypted, no veto from us. */ + if (!fscrypt_needs_contents_encryption(inode)) + return true; + + /* We only support DIO with inline crypto, not fs-layer crypto. */ + if (!fscrypt_inode_uses_inline_crypto(inode)) + return false; + + /* + * Since the granularity of encryption is filesystem blocks, the file + * position and total I/O length must be aligned to the filesystem block + * size -- not just to the block device's logical block size as is + * traditionally the case for DIO on many filesystems. + * + * We require that the user-provided memory buffers be filesystem block + * aligned too. It is simpler to have a single alignment value required + * for all properties of the I/O, as is normally the case for DIO. + * Also, allowing less aligned buffers would imply that data units could + * cross bvecs, which would greatly complicate the I/O stack, which + * assumes that bios can be split at any bvec boundary. + */ + if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize)) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(fscrypt_dio_supported); + +/** + * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs + * @inode: the file on which I/O is being done + * @lblk: the block at which the I/O is being started from + * @nr_blocks: the number of blocks we want to submit starting at @lblk + * + * Determine the limit to the number of blocks that can be submitted in a bio + * targeting @lblk without causing a data unit number (DUN) discontiguity. + * + * This is normally just @nr_blocks, as normally the DUNs just increment along + * with the logical blocks. (Or the file is not encrypted.) + * + * In rare cases, fscrypt can be using an IV generation method that allows the + * DUN to wrap around within logically contiguous blocks, and that wraparound + * will occur. If this happens, a value less than @nr_blocks will be returned + * so that the wraparound doesn't occur in the middle of a bio, which would + * cause encryption/decryption to produce wrong results. + * + * Return: the actual number of blocks that can be submitted + */ +u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) +{ + const struct fscrypt_info *ci; + u32 dun; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return nr_blocks; + + if (nr_blocks <= 1) + return nr_blocks; + + ci = inode->i_crypt_info; + if (!(fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + return nr_blocks; + + /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ + + dun = ci->ci_hashed_ino + lblk; + + return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); +} +EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 91ea9477e9bd..50d92d805bd8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -714,6 +714,10 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh); +bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter); + +u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks); + #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) @@ -742,6 +746,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, { return true; } + +static inline bool fscrypt_dio_supported(struct kiocb *iocb, + struct iov_iter *iter) +{ + const struct inode *inode = file_inode(iocb->ki_filp); + + return !fscrypt_needs_contents_encryption(inode); +} + +static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, + u64 nr_blocks) +{ + return nr_blocks; +} #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ /** -- cgit v1.2.3-71-gd317 From b2309a71c1f2fc841feb184195b2e46b2e139bf4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 7 Feb 2022 10:41:07 -0800 Subject: net: add dev->dev_registered_tracker Convert one dev_hold()/dev_put() pair in register_netdevice() and unregister_netdevice_many() to dev_hold_track() and dev_put_track(). This would allow to detect a rogue dev_put() a bit earlier. Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20220207184107.1401096-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 3 +++ net/core/dev.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3fb6fb67ed77..5f6e2c0b0c90 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1948,6 +1948,8 @@ enum netdev_ml_priv_type { * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. * @linkwatch_dev_tracker: refcount tracker used by linkwatch. * @watchdog_dev_tracker: refcount tracker used by watchdog. + * @dev_registered_tracker: tracker for reference held while + * registered * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2282,6 +2284,7 @@ struct net_device { u8 dev_addr_shadow[MAX_ADDR_LEN]; netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; }; #define to_net_dev(d) container_of(d, struct net_device, dev) diff --git a/net/core/dev.c b/net/core/dev.c index f662c6a7d7b4..66556a21800a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9683,8 +9683,10 @@ int register_netdevice(struct net_device *dev) linkwatch_init_dev(dev); dev_init_scheduler(dev); - dev_hold(dev); + + dev_hold_track(dev, &dev->dev_registered_tracker, GFP_KERNEL); list_netdevice(dev); + add_device_randomness(dev->dev_addr, dev->addr_len); /* If the device has permanent device address, driver should @@ -10449,7 +10451,7 @@ void unregister_netdevice_many(struct list_head *head) synchronize_net(); list_for_each_entry(dev, head, unreg_list) { - dev_put(dev); + dev_put_track(dev, &dev->dev_registered_tracker); net_set_todo(dev); } -- cgit v1.2.3-71-gd317 From 6523d3b2ffa238ac033c34a726617061d6a744aa Mon Sep 17 00:00:00 2001 From: Iwona Winiarska Date: Tue, 8 Feb 2022 16:36:30 +0100 Subject: peci: Add core infrastructure Intel processors provide access for various services designed to support processor and DRAM thermal management, platform manageability and processor interface tuning and diagnostics. Those services are available via the Platform Environment Control Interface (PECI) that provides a communication channel between the processor and the Baseboard Management Controller (BMC) or other platform management device. This change introduces PECI subsystem by adding the initial core module and API for controller drivers. Co-developed-by: Jason M Bills Co-developed-by: Jae Hyun Yoo Reviewed-by: Pierre-Louis Bossart Acked-by: Joel Stanley Signed-off-by: Jason M Bills Signed-off-by: Jae Hyun Yoo Signed-off-by: Iwona Winiarska Link: https://lore.kernel.org/r/20220208153639.255278-5-iwona.winiarska@intel.com Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 8 +++ drivers/Kconfig | 3 + drivers/Makefile | 1 + drivers/peci/Kconfig | 15 +++++ drivers/peci/Makefile | 5 ++ drivers/peci/core.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/peci/internal.h | 16 +++++ include/linux/peci.h | 99 ++++++++++++++++++++++++++++++ 8 files changed, 305 insertions(+) create mode 100644 drivers/peci/Kconfig create mode 100644 drivers/peci/Makefile create mode 100644 drivers/peci/core.c create mode 100644 drivers/peci/internal.h create mode 100644 include/linux/peci.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index f41088418aae..0fd6c9f40406 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15096,6 +15096,14 @@ L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/peaq-wmi.c +PECI SUBSYSTEM +M: Iwona Winiarska +L: openbmc@lists.ozlabs.org (moderated for non-subscribers) +S: Supported +F: Documentation/devicetree/bindings/peci/ +F: drivers/peci/ +F: include/linux/peci.h + PENSANDO ETHERNET DRIVERS M: Shannon Nelson M: drivers@pensando.io diff --git a/drivers/Kconfig b/drivers/Kconfig index 0d399ddaa185..8d6cd5d08722 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -236,4 +236,7 @@ source "drivers/interconnect/Kconfig" source "drivers/counter/Kconfig" source "drivers/most/Kconfig" + +source "drivers/peci/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index a110338c860c..020780b6b4d2 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -187,3 +187,4 @@ obj-$(CONFIG_GNSS) += gnss/ obj-$(CONFIG_INTERCONNECT) += interconnect/ obj-$(CONFIG_COUNTER) += counter/ obj-$(CONFIG_MOST) += most/ +obj-$(CONFIG_PECI) += peci/ diff --git a/drivers/peci/Kconfig b/drivers/peci/Kconfig new file mode 100644 index 000000000000..71a4ad81225a --- /dev/null +++ b/drivers/peci/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menuconfig PECI + tristate "PECI support" + help + The Platform Environment Control Interface (PECI) is an interface + that provides a communication channel to Intel processors and + chipset components from external monitoring or control devices. + + If you are building a Baseboard Management Controller (BMC) kernel + for Intel platform say Y here and also to the specific driver for + your adapter(s) below. If unsure say N. + + This support is also available as a module. If so, the module + will be called peci. diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile new file mode 100644 index 000000000000..e789a354e842 --- /dev/null +++ b/drivers/peci/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Core functionality +peci-y := core.o +obj-$(CONFIG_PECI) += peci.o diff --git a/drivers/peci/core.c b/drivers/peci/core.c new file mode 100644 index 000000000000..73ad0a47fa9d --- /dev/null +++ b/drivers/peci/core.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2018-2021 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +static DEFINE_IDA(peci_controller_ida); + +static void peci_controller_dev_release(struct device *dev) +{ + struct peci_controller *controller = to_peci_controller(dev); + + mutex_destroy(&controller->bus_lock); + ida_free(&peci_controller_ida, controller->id); + kfree(controller); +} + +struct device_type peci_controller_type = { + .release = peci_controller_dev_release, +}; + +static struct peci_controller *peci_controller_alloc(struct device *dev, + struct peci_controller_ops *ops) +{ + struct peci_controller *controller; + int ret; + + if (!ops->xfer) + return ERR_PTR(-EINVAL); + + controller = kzalloc(sizeof(*controller), GFP_KERNEL); + if (!controller) + return ERR_PTR(-ENOMEM); + + ret = ida_alloc_max(&peci_controller_ida, U8_MAX, GFP_KERNEL); + if (ret < 0) + goto err; + controller->id = ret; + + controller->ops = ops; + + controller->dev.parent = dev; + controller->dev.bus = &peci_bus_type; + controller->dev.type = &peci_controller_type; + + device_initialize(&controller->dev); + + mutex_init(&controller->bus_lock); + + return controller; + +err: + kfree(controller); + return ERR_PTR(ret); +} + +static void unregister_controller(void *_controller) +{ + struct peci_controller *controller = _controller; + + device_unregister(&controller->dev); + + fwnode_handle_put(controller->dev.fwnode); + + pm_runtime_disable(&controller->dev); +} + +/** + * devm_peci_controller_add() - add PECI controller + * @dev: device for devm operations + * @ops: pointer to controller specific methods + * + * In final stage of its probe(), peci_controller driver calls + * devm_peci_controller_add() to register itself with the PECI bus. + * + * Return: Pointer to the newly allocated controller or ERR_PTR() in case of failure. + */ +struct peci_controller *devm_peci_controller_add(struct device *dev, + struct peci_controller_ops *ops) +{ + struct peci_controller *controller; + int ret; + + controller = peci_controller_alloc(dev, ops); + if (IS_ERR(controller)) + return controller; + + ret = dev_set_name(&controller->dev, "peci-%d", controller->id); + if (ret) + goto err_put; + + pm_runtime_no_callbacks(&controller->dev); + pm_suspend_ignore_children(&controller->dev, true); + pm_runtime_enable(&controller->dev); + + device_set_node(&controller->dev, fwnode_handle_get(dev_fwnode(dev))); + + ret = device_add(&controller->dev); + if (ret) + goto err_fwnode; + + ret = devm_add_action_or_reset(dev, unregister_controller, controller); + if (ret) + return ERR_PTR(ret); + + return controller; + +err_fwnode: + fwnode_handle_put(controller->dev.fwnode); + + pm_runtime_disable(&controller->dev); + +err_put: + put_device(&controller->dev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_NS_GPL(devm_peci_controller_add, PECI); + +struct bus_type peci_bus_type = { + .name = "peci", +}; + +static int __init peci_init(void) +{ + int ret; + + ret = bus_register(&peci_bus_type); + if (ret < 0) { + pr_err("peci: failed to register PECI bus type!\n"); + return ret; + } + + return 0; +} +module_init(peci_init); + +static void __exit peci_exit(void) +{ + bus_unregister(&peci_bus_type); +} +module_exit(peci_exit); + +MODULE_AUTHOR("Jason M Bills "); +MODULE_AUTHOR("Jae Hyun Yoo "); +MODULE_AUTHOR("Iwona Winiarska "); +MODULE_DESCRIPTION("PECI bus core module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h new file mode 100644 index 000000000000..918dea745a86 --- /dev/null +++ b/drivers/peci/internal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2021 Intel Corporation */ + +#ifndef __PECI_INTERNAL_H +#define __PECI_INTERNAL_H + +#include +#include + +struct peci_controller; + +extern struct bus_type peci_bus_type; + +extern struct device_type peci_controller_type; + +#endif /* __PECI_INTERNAL_H */ diff --git a/include/linux/peci.h b/include/linux/peci.h new file mode 100644 index 000000000000..26e0a4e73b50 --- /dev/null +++ b/include/linux/peci.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2021 Intel Corporation */ + +#ifndef __LINUX_PECI_H +#define __LINUX_PECI_H + +#include +#include +#include +#include + +/* + * Currently we don't support any PECI command over 32 bytes. + */ +#define PECI_REQUEST_MAX_BUF_SIZE 32 + +struct peci_controller; +struct peci_request; + +/** + * struct peci_controller_ops - PECI controller specific methods + * @xfer: PECI transfer function + * + * PECI controllers may have different hardware interfaces - the drivers + * implementing PECI controllers can use this structure to abstract away those + * differences by exposing a common interface for PECI core. + */ +struct peci_controller_ops { + int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req); +}; + +/** + * struct peci_controller - PECI controller + * @dev: device object to register PECI controller to the device model + * @ops: pointer to device specific controller operations + * @bus_lock: lock used to protect multiple callers + * @id: PECI controller ID + * + * PECI controllers usually connect to their drivers using non-PECI bus, + * such as the platform bus. + * Each PECI controller can communicate with one or more PECI devices. + */ +struct peci_controller { + struct device dev; + struct peci_controller_ops *ops; + struct mutex bus_lock; /* held for the duration of xfer */ + u8 id; +}; + +struct peci_controller *devm_peci_controller_add(struct device *parent, + struct peci_controller_ops *ops); + +static inline struct peci_controller *to_peci_controller(void *d) +{ + return container_of(d, struct peci_controller, dev); +} + +/** + * struct peci_device - PECI device + * @dev: device object to register PECI device to the device model + * @controller: manages the bus segment hosting this PECI device + * @addr: address used on the PECI bus connected to the parent controller + * + * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus. + * The behaviour exposed to the rest of the system is defined by the PECI driver + * managing the device. + */ +struct peci_device { + struct device dev; + u8 addr; +}; + +static inline struct peci_device *to_peci_device(struct device *d) +{ + return container_of(d, struct peci_device, dev); +} + +/** + * struct peci_request - PECI request + * @device: PECI device to which the request is sent + * @tx: TX buffer specific data + * @tx.buf: TX buffer + * @tx.len: transfer data length in bytes + * @rx: RX buffer specific data + * @rx.buf: RX buffer + * @rx.len: received data length in bytes + * + * A peci_request represents a request issued by PECI originator (TX) and + * a response received from PECI responder (RX). + */ +struct peci_request { + struct peci_device *device; + struct { + u8 buf[PECI_REQUEST_MAX_BUF_SIZE]; + u8 len; + } rx, tx; +}; + +#endif /* __LINUX_PECI_H */ -- cgit v1.2.3-71-gd317 From 52857e6828e260b16ac569578705f83cf2a71ac1 Mon Sep 17 00:00:00 2001 From: Iwona Winiarska Date: Tue, 8 Feb 2022 16:36:32 +0100 Subject: peci: Add device detection Since PECI devices are discoverable, we can dynamically detect devices that are actually available in the system. This change complements the earlier implementation by rescanning PECI bus to detect available devices. For this purpose, it also introduces the minimal API for PECI requests. Reviewed-by: Pierre-Louis Bossart Acked-by: Joel Stanley Signed-off-by: Iwona Winiarska Link: https://lore.kernel.org/r/20220208153639.255278-7-iwona.winiarska@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/peci/Makefile | 2 +- drivers/peci/core.c | 33 +++++++++++++ drivers/peci/device.c | 120 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/peci/internal.h | 14 ++++++ drivers/peci/request.c | 55 ++++++++++++++++++++++ include/linux/peci.h | 2 + 6 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 drivers/peci/device.c create mode 100644 drivers/peci/request.c (limited to 'include/linux') diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile index 926d8df15cbd..c5f9d3fe21bb 100644 --- a/drivers/peci/Makefile +++ b/drivers/peci/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only # Core functionality -peci-y := core.o +peci-y := core.o request.o device.o obj-$(CONFIG_PECI) += peci.o # Hardware specific bus drivers diff --git a/drivers/peci/core.c b/drivers/peci/core.c index 73ad0a47fa9d..c3361e6e043a 100644 --- a/drivers/peci/core.c +++ b/drivers/peci/core.c @@ -29,6 +29,20 @@ struct device_type peci_controller_type = { .release = peci_controller_dev_release, }; +static int peci_controller_scan_devices(struct peci_controller *controller) +{ + int ret; + u8 addr; + + for (addr = PECI_BASE_ADDR; addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX; addr++) { + ret = peci_device_create(controller, addr); + if (ret) + return ret; + } + + return 0; +} + static struct peci_controller *peci_controller_alloc(struct device *dev, struct peci_controller_ops *ops) { @@ -64,10 +78,23 @@ err: return ERR_PTR(ret); } +static int unregister_child(struct device *dev, void *dummy) +{ + peci_device_destroy(to_peci_device(dev)); + + return 0; +} + static void unregister_controller(void *_controller) { struct peci_controller *controller = _controller; + /* + * Detach any active PECI devices. This can't fail, thus we do not + * check the returned value. + */ + device_for_each_child_reverse(&controller->dev, NULL, unregister_child); + device_unregister(&controller->dev); fwnode_handle_put(controller->dev.fwnode); @@ -113,6 +140,12 @@ struct peci_controller *devm_peci_controller_add(struct device *dev, if (ret) return ERR_PTR(ret); + /* + * Ignoring retval since failures during scan are non-critical for + * controller itself. + */ + peci_controller_scan_devices(controller); + return controller; err_fwnode: diff --git a/drivers/peci/device.c b/drivers/peci/device.c new file mode 100644 index 000000000000..2b3a2d893aaf --- /dev/null +++ b/drivers/peci/device.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2018-2021 Intel Corporation + +#include +#include + +#include "internal.h" + +/* + * PECI device can be removed using sysfs, but the removal can also happen as + * a result of controller being removed. + * Mutex is used to protect PECI device from being double-deleted. + */ +static DEFINE_MUTEX(peci_device_del_lock); + +static int peci_detect(struct peci_controller *controller, u8 addr) +{ + /* + * PECI Ping is a command encoded by tx_len = 0, rx_len = 0. + * We expect correct Write FCS if the device at the target address + * is able to respond. + */ + struct peci_request req = { 0 }; + int ret; + + mutex_lock(&controller->bus_lock); + ret = controller->ops->xfer(controller, addr, &req); + mutex_unlock(&controller->bus_lock); + + return ret; +} + +static bool peci_addr_valid(u8 addr) +{ + return addr >= PECI_BASE_ADDR && addr < PECI_BASE_ADDR + PECI_DEVICE_NUM_MAX; +} + +static int peci_dev_exists(struct device *dev, void *data) +{ + struct peci_device *device = to_peci_device(dev); + u8 *addr = data; + + if (device->addr == *addr) + return -EBUSY; + + return 0; +} + +int peci_device_create(struct peci_controller *controller, u8 addr) +{ + struct peci_device *device; + int ret; + + if (!peci_addr_valid(addr)) + return -EINVAL; + + /* Check if we have already detected this device before. */ + ret = device_for_each_child(&controller->dev, &addr, peci_dev_exists); + if (ret) + return 0; + + ret = peci_detect(controller, addr); + if (ret) { + /* + * Device not present or host state doesn't allow successful + * detection at this time. + */ + if (ret == -EIO || ret == -ETIMEDOUT) + return 0; + + return ret; + } + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + device_initialize(&device->dev); + + device->addr = addr; + device->dev.parent = &controller->dev; + device->dev.bus = &peci_bus_type; + device->dev.type = &peci_device_type; + + ret = dev_set_name(&device->dev, "%d-%02x", controller->id, device->addr); + if (ret) + goto err_put; + + ret = device_add(&device->dev); + if (ret) + goto err_put; + + return 0; + +err_put: + put_device(&device->dev); + + return ret; +} + +void peci_device_destroy(struct peci_device *device) +{ + mutex_lock(&peci_device_del_lock); + if (!device->deleted) { + device_unregister(&device->dev); + device->deleted = true; + } + mutex_unlock(&peci_device_del_lock); +} + +static void peci_device_release(struct device *dev) +{ + struct peci_device *device = to_peci_device(dev); + + kfree(device); +} + +struct device_type peci_device_type = { + .release = peci_device_release, +}; diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h index 918dea745a86..57d11a902c5d 100644 --- a/drivers/peci/internal.h +++ b/drivers/peci/internal.h @@ -8,6 +8,20 @@ #include struct peci_controller; +struct peci_device; +struct peci_request; + +/* PECI CPU address range 0x30-0x37 */ +#define PECI_BASE_ADDR 0x30 +#define PECI_DEVICE_NUM_MAX 8 + +struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len); +void peci_request_free(struct peci_request *req); + +extern struct device_type peci_device_type; + +int peci_device_create(struct peci_controller *controller, u8 addr); +void peci_device_destroy(struct peci_device *device); extern struct bus_type peci_bus_type; diff --git a/drivers/peci/request.c b/drivers/peci/request.c new file mode 100644 index 000000000000..7dee51c50dd2 --- /dev/null +++ b/drivers/peci/request.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2021 Intel Corporation + +#include +#include +#include +#include + +#include "internal.h" + +/** + * peci_request_alloc() - allocate &struct peci_requests + * @device: PECI device to which request is going to be sent + * @tx_len: TX length + * @rx_len: RX length + * + * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise. + */ +struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len) +{ + struct peci_request *req; + + /* + * TX and RX buffers are fixed length members of peci_request, this is + * just a warn for developers to make sure to expand the buffers (or + * change the allocation method) if we go over the current limit. + */ + if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE)) + return NULL; + /* + * PECI controllers that we are using now don't support DMA, this + * should be converted to DMA API once support for controllers that do + * allow it is added to avoid an extra copy. + */ + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->device = device; + req->tx.len = tx_len; + req->rx.len = rx_len; + + return req; +} +EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI); + +/** + * peci_request_free() - free peci_request + * @req: the PECI request to be freed + */ +void peci_request_free(struct peci_request *req) +{ + kfree(req); +} +EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI); diff --git a/include/linux/peci.h b/include/linux/peci.h index 26e0a4e73b50..7e35673f3786 100644 --- a/include/linux/peci.h +++ b/include/linux/peci.h @@ -60,6 +60,7 @@ static inline struct peci_controller *to_peci_controller(void *d) * @dev: device object to register PECI device to the device model * @controller: manages the bus segment hosting this PECI device * @addr: address used on the PECI bus connected to the parent controller + * @deleted: indicates that PECI device was already deleted * * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus. * The behaviour exposed to the rest of the system is defined by the PECI driver @@ -68,6 +69,7 @@ static inline struct peci_controller *to_peci_controller(void *d) struct peci_device { struct device dev; u8 addr; + bool deleted; }; static inline struct peci_device *to_peci_device(struct device *d) -- cgit v1.2.3-71-gd317 From 6b8145b054b27319dddaad4abbb5184e343375da Mon Sep 17 00:00:00 2001 From: Iwona Winiarska Date: Tue, 8 Feb 2022 16:36:34 +0100 Subject: peci: Add support for PECI device drivers Add support for PECI device drivers, which unlike PECI controller drivers are actually able to provide functionalities to userspace. Also, extend peci_request API to allow querying more details about PECI device (e.g. model/family), that's going to be used to find a compatible peci_driver. Reviewed-by: Pierre-Louis Bossart Acked-by: Joel Stanley Signed-off-by: Iwona Winiarska Link: https://lore.kernel.org/r/20220208153639.255278-9-iwona.winiarska@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/peci/core.c | 44 ++++++++++ drivers/peci/device.c | 130 +++++++++++++++++++++++++++++ drivers/peci/internal.h | 74 +++++++++++++++++ drivers/peci/request.c | 214 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/peci.h | 19 +++++ 5 files changed, 481 insertions(+) (limited to 'include/linux') diff --git a/drivers/peci/core.c b/drivers/peci/core.c index e993615cf521..9c8cf07e51c7 100644 --- a/drivers/peci/core.c +++ b/drivers/peci/core.c @@ -160,8 +160,52 @@ err_put: } EXPORT_SYMBOL_NS_GPL(devm_peci_controller_add, PECI); +static const struct peci_device_id * +peci_bus_match_device_id(const struct peci_device_id *id, struct peci_device *device) +{ + while (id->family != 0) { + if (id->family == device->info.family && + id->model == device->info.model) + return id; + id++; + } + + return NULL; +} + +static int peci_bus_device_match(struct device *dev, struct device_driver *drv) +{ + struct peci_device *device = to_peci_device(dev); + struct peci_driver *peci_drv = to_peci_driver(drv); + + if (dev->type != &peci_device_type) + return 0; + + return !!peci_bus_match_device_id(peci_drv->id_table, device); +} + +static int peci_bus_device_probe(struct device *dev) +{ + struct peci_device *device = to_peci_device(dev); + struct peci_driver *driver = to_peci_driver(dev->driver); + + return driver->probe(device, peci_bus_match_device_id(driver->id_table, device)); +} + +static void peci_bus_device_remove(struct device *dev) +{ + struct peci_device *device = to_peci_device(dev); + struct peci_driver *driver = to_peci_driver(dev->driver); + + if (driver->remove) + driver->remove(device); +} + struct bus_type peci_bus_type = { .name = "peci", + .match = peci_bus_device_match, + .probe = peci_bus_device_probe, + .remove = peci_bus_device_remove, .bus_groups = peci_bus_groups, }; diff --git a/drivers/peci/device.c b/drivers/peci/device.c index d10ed1cfcd48..184b5e650b0b 100644 --- a/drivers/peci/device.c +++ b/drivers/peci/device.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2018-2021 Intel Corporation +#include #include #include @@ -13,6 +14,104 @@ */ static DEFINE_MUTEX(peci_device_del_lock); +#define REVISION_NUM_MASK GENMASK(15, 8) +static int peci_get_revision(struct peci_device *device, u8 *revision) +{ + struct peci_request *req; + u64 dib; + + req = peci_xfer_get_dib(device); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* + * PECI device may be in a state where it is unable to return a proper + * DIB, in which case it returns 0 as DIB value. + * Let's treat this as an error to avoid carrying on with the detection + * using invalid revision. + */ + dib = peci_request_dib_read(req); + if (dib == 0) { + peci_request_free(req); + return -EIO; + } + + *revision = FIELD_GET(REVISION_NUM_MASK, dib); + + peci_request_free(req); + + return 0; +} + +static int peci_get_cpu_id(struct peci_device *device, u32 *cpu_id) +{ + struct peci_request *req; + int ret; + + req = peci_xfer_pkg_cfg_readl(device, PECI_PCS_PKG_ID, PECI_PKG_ID_CPU_ID); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = peci_request_status(req); + if (ret) + goto out_req_free; + + *cpu_id = peci_request_data_readl(req); +out_req_free: + peci_request_free(req); + + return ret; +} + +static unsigned int peci_x86_cpu_family(unsigned int sig) +{ + unsigned int x86; + + x86 = (sig >> 8) & 0xf; + + if (x86 == 0xf) + x86 += (sig >> 20) & 0xff; + + return x86; +} + +static unsigned int peci_x86_cpu_model(unsigned int sig) +{ + unsigned int fam, model; + + fam = peci_x86_cpu_family(sig); + + model = (sig >> 4) & 0xf; + + if (fam >= 0x6) + model += ((sig >> 16) & 0xf) << 4; + + return model; +} + +static int peci_device_info_init(struct peci_device *device) +{ + u8 revision; + u32 cpu_id; + int ret; + + ret = peci_get_cpu_id(device, &cpu_id); + if (ret) + return ret; + + device->info.family = peci_x86_cpu_family(cpu_id); + device->info.model = peci_x86_cpu_model(cpu_id); + + ret = peci_get_revision(device, &revision); + if (ret) + return ret; + device->info.peci_revision = revision; + + device->info.socket_id = device->addr - PECI_BASE_ADDR; + + return 0; +} + static int peci_detect(struct peci_controller *controller, u8 addr) { /* @@ -82,6 +181,10 @@ int peci_device_create(struct peci_controller *controller, u8 addr) device->dev.bus = &peci_bus_type; device->dev.type = &peci_device_type; + ret = peci_device_info_init(device); + if (ret) + goto err_put; + ret = dev_set_name(&device->dev, "%d-%02x", controller->id, device->addr); if (ret) goto err_put; @@ -108,6 +211,33 @@ void peci_device_destroy(struct peci_device *device) mutex_unlock(&peci_device_del_lock); } +int __peci_driver_register(struct peci_driver *driver, struct module *owner, + const char *mod_name) +{ + driver->driver.bus = &peci_bus_type; + driver->driver.owner = owner; + driver->driver.mod_name = mod_name; + + if (!driver->probe) { + pr_err("peci: trying to register driver without probe callback\n"); + return -EINVAL; + } + + if (!driver->id_table) { + pr_err("peci: trying to register driver without device id table\n"); + return -EINVAL; + } + + return driver_register(&driver->driver); +} +EXPORT_SYMBOL_NS_GPL(__peci_driver_register, PECI); + +void peci_driver_unregister(struct peci_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_NS_GPL(peci_driver_unregister, PECI); + static void peci_device_release(struct device *dev) { struct peci_device *device = to_peci_device(dev); diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h index 978e12c8e1d3..52c02e12874f 100644 --- a/drivers/peci/internal.h +++ b/drivers/peci/internal.h @@ -19,6 +19,35 @@ struct peci_request; struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len); void peci_request_free(struct peci_request *req); +int peci_request_status(struct peci_request *req); + +u64 peci_request_dib_read(struct peci_request *req); + +u8 peci_request_data_readb(struct peci_request *req); +u16 peci_request_data_readw(struct peci_request *req); +u32 peci_request_data_readl(struct peci_request *req); +u64 peci_request_data_readq(struct peci_request *req); + +struct peci_request *peci_xfer_get_dib(struct peci_device *device); +struct peci_request *peci_xfer_get_temp(struct peci_device *device); + +struct peci_request *peci_xfer_pkg_cfg_readb(struct peci_device *device, u8 index, u16 param); +struct peci_request *peci_xfer_pkg_cfg_readw(struct peci_device *device, u8 index, u16 param); +struct peci_request *peci_xfer_pkg_cfg_readl(struct peci_device *device, u8 index, u16 param); +struct peci_request *peci_xfer_pkg_cfg_readq(struct peci_device *device, u8 index, u16 param); + +/** + * struct peci_device_id - PECI device data to match + * @data: pointer to driver private data specific to device + * @family: device family + * @model: device model + */ +struct peci_device_id { + const void *data; + u16 family; + u8 model; +}; + extern struct device_type peci_device_type; extern const struct attribute_group *peci_device_groups[]; @@ -28,6 +57,51 @@ void peci_device_destroy(struct peci_device *device); extern struct bus_type peci_bus_type; extern const struct attribute_group *peci_bus_groups[]; +/** + * struct peci_driver - PECI driver + * @driver: inherit device driver + * @probe: probe callback + * @remove: remove callback + * @id_table: PECI device match table to decide which device to bind + */ +struct peci_driver { + struct device_driver driver; + int (*probe)(struct peci_device *device, const struct peci_device_id *id); + void (*remove)(struct peci_device *device); + const struct peci_device_id *id_table; +}; + +static inline struct peci_driver *to_peci_driver(struct device_driver *d) +{ + return container_of(d, struct peci_driver, driver); +} + +int __peci_driver_register(struct peci_driver *driver, struct module *owner, + const char *mod_name); +/** + * peci_driver_register() - register PECI driver + * @driver: the driver to be registered + * + * PECI drivers that don't need to do anything special in module init should + * use the convenience "module_peci_driver" macro instead + * + * Return: zero on success, else a negative error code. + */ +#define peci_driver_register(driver) \ + __peci_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +void peci_driver_unregister(struct peci_driver *driver); + +/** + * module_peci_driver() - helper macro for registering a modular PECI driver + * @__peci_driver: peci_driver struct + * + * Helper macro for PECI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_peci_driver(__peci_driver) \ + module_driver(__peci_driver, peci_driver_register, peci_driver_unregister) + extern struct device_type peci_controller_type; int peci_controller_scan_devices(struct peci_controller *controller); diff --git a/drivers/peci/request.c b/drivers/peci/request.c index 7dee51c50dd2..a49eb351cda3 100644 --- a/drivers/peci/request.c +++ b/drivers/peci/request.c @@ -1,13 +1,140 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2021 Intel Corporation +#include #include #include #include #include +#include + #include "internal.h" +#define PECI_GET_DIB_CMD 0xf7 +#define PECI_GET_DIB_WR_LEN 1 +#define PECI_GET_DIB_RD_LEN 8 + +#define PECI_RDPKGCFG_CMD 0xa1 +#define PECI_RDPKGCFG_WR_LEN 5 +#define PECI_RDPKGCFG_RD_LEN_BASE 1 +#define PECI_WRPKGCFG_CMD 0xa5 +#define PECI_WRPKGCFG_WR_LEN_BASE 6 +#define PECI_WRPKGCFG_RD_LEN 1 + +/* Device Specific Completion Code (CC) Definition */ +#define PECI_CC_SUCCESS 0x40 +#define PECI_CC_NEED_RETRY 0x80 +#define PECI_CC_OUT_OF_RESOURCE 0x81 +#define PECI_CC_UNAVAIL_RESOURCE 0x82 +#define PECI_CC_INVALID_REQ 0x90 +#define PECI_CC_MCA_ERROR 0x91 +#define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93 +#define PECI_CC_FATAL_MCA_ERROR 0x94 +#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98 +#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B +#define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C + +#define PECI_RETRY_BIT BIT(0) + +#define PECI_RETRY_TIMEOUT msecs_to_jiffies(700) +#define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1) +#define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128) + +static u8 peci_request_data_cc(struct peci_request *req) +{ + return req->rx.buf[0]; +} + +/** + * peci_request_status() - return -errno based on PECI completion code + * @req: the PECI request that contains response data with completion code + * + * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we + * don't expect completion code in the response. + * + * Return: -errno + */ +int peci_request_status(struct peci_request *req) +{ + u8 cc = peci_request_data_cc(req); + + if (cc != PECI_CC_SUCCESS) + dev_dbg(&req->device->dev, "ret: %#02x\n", cc); + + switch (cc) { + case PECI_CC_SUCCESS: + return 0; + case PECI_CC_NEED_RETRY: + case PECI_CC_OUT_OF_RESOURCE: + case PECI_CC_UNAVAIL_RESOURCE: + return -EAGAIN; + case PECI_CC_INVALID_REQ: + return -EINVAL; + case PECI_CC_MCA_ERROR: + case PECI_CC_CATASTROPHIC_MCA_ERROR: + case PECI_CC_FATAL_MCA_ERROR: + case PECI_CC_PARITY_ERR_GPSB_OR_PMSB: + case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR: + case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA: + return -EIO; + } + + WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc); + + return -EIO; +} +EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI); + +static int peci_request_xfer(struct peci_request *req) +{ + struct peci_device *device = req->device; + struct peci_controller *controller = to_peci_controller(device->dev.parent); + int ret; + + mutex_lock(&controller->bus_lock); + ret = controller->ops->xfer(controller, device->addr, req); + mutex_unlock(&controller->bus_lock); + + return ret; +} + +static int peci_request_xfer_retry(struct peci_request *req) +{ + long wait_interval = PECI_RETRY_INTERVAL_MIN; + struct peci_device *device = req->device; + struct peci_controller *controller = to_peci_controller(device->dev.parent); + unsigned long start = jiffies; + int ret; + + /* Don't try to use it for ping */ + if (WARN_ON(req->tx.len == 0)) + return 0; + + do { + ret = peci_request_xfer(req); + if (ret) { + dev_dbg(&controller->dev, "xfer error: %d\n", ret); + return ret; + } + + if (peci_request_status(req) != -EAGAIN) + return 0; + + /* Set the retry bit to indicate a retry attempt */ + req->tx.buf[1] |= PECI_RETRY_BIT; + + if (schedule_timeout_interruptible(wait_interval)) + return -ERESTARTSYS; + + wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX); + } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT)); + + dev_dbg(&controller->dev, "request timed out\n"); + + return -ETIMEDOUT; +} + /** * peci_request_alloc() - allocate &struct peci_requests * @device: PECI device to which request is going to be sent @@ -53,3 +180,90 @@ void peci_request_free(struct peci_request *req) kfree(req); } EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI); + +struct peci_request *peci_xfer_get_dib(struct peci_device *device) +{ + struct peci_request *req; + int ret; + + req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN); + if (!req) + return ERR_PTR(-ENOMEM); + + req->tx.buf[0] = PECI_GET_DIB_CMD; + + ret = peci_request_xfer(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} +EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI); + +static struct peci_request * +__pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len) +{ + struct peci_request *req; + int ret; + + req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len); + if (!req) + return ERR_PTR(-ENOMEM); + + req->tx.buf[0] = PECI_RDPKGCFG_CMD; + req->tx.buf[1] = 0; + req->tx.buf[2] = index; + put_unaligned_le16(param, &req->tx.buf[3]); + + ret = peci_request_xfer_retry(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} + +u8 peci_request_data_readb(struct peci_request *req) +{ + return req->rx.buf[1]; +} +EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI); + +u16 peci_request_data_readw(struct peci_request *req) +{ + return get_unaligned_le16(&req->rx.buf[1]); +} +EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI); + +u32 peci_request_data_readl(struct peci_request *req) +{ + return get_unaligned_le32(&req->rx.buf[1]); +} +EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI); + +u64 peci_request_data_readq(struct peci_request *req) +{ + return get_unaligned_le64(&req->rx.buf[1]); +} +EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI); + +u64 peci_request_dib_read(struct peci_request *req) +{ + return get_unaligned_le64(&req->rx.buf[0]); +} +EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI); + +#define __read_pkg_config(x, type) \ +struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \ +{ \ + return __pkg_cfg_read(device, index, param, sizeof(type)); \ +} \ +EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI) + +__read_pkg_config(readb, u8); +__read_pkg_config(readw, u16); +__read_pkg_config(readl, u32); +__read_pkg_config(readq, u64); diff --git a/include/linux/peci.h b/include/linux/peci.h index 7e35673f3786..4eda423ba10c 100644 --- a/include/linux/peci.h +++ b/include/linux/peci.h @@ -14,6 +14,14 @@ */ #define PECI_REQUEST_MAX_BUF_SIZE 32 +#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */ +#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */ +#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */ +#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */ +#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */ +#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */ +#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */ + struct peci_controller; struct peci_request; @@ -59,6 +67,11 @@ static inline struct peci_controller *to_peci_controller(void *d) * struct peci_device - PECI device * @dev: device object to register PECI device to the device model * @controller: manages the bus segment hosting this PECI device + * @info: PECI device characteristics + * @info.family: device family + * @info.model: device model + * @info.peci_revision: PECI revision supported by the PECI device + * @info.socket_id: the socket ID represented by the PECI device * @addr: address used on the PECI bus connected to the parent controller * @deleted: indicates that PECI device was already deleted * @@ -68,6 +81,12 @@ static inline struct peci_controller *to_peci_controller(void *d) */ struct peci_device { struct device dev; + struct { + u16 family; + u8 model; + u8 peci_revision; + u8 socket_id; + } info; u8 addr; bool deleted; }; -- cgit v1.2.3-71-gd317 From 93e1821c80f9460c8931dc4bc090ede794f966cd Mon Sep 17 00:00:00 2001 From: Iwona Winiarska Date: Tue, 8 Feb 2022 16:36:35 +0100 Subject: peci: Add peci-cpu driver PECI is an interface that may be used by different types of devices. Add a peci-cpu driver compatible with Intel processors. The driver is responsible for handling auxiliary devices that can subsequently be used by other drivers (e.g. hwmons). Reviewed-by: Pierre-Louis Bossart Acked-by: Joel Stanley Signed-off-by: Iwona Winiarska Link: https://lore.kernel.org/r/20220208153639.255278-10-iwona.winiarska@intel.com Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + drivers/peci/Kconfig | 15 +++ drivers/peci/Makefile | 2 + drivers/peci/cpu.c | 343 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/peci/device.c | 1 + drivers/peci/internal.h | 27 ++++ drivers/peci/request.c | 213 +++++++++++++++++++++++++++++ include/linux/peci-cpu.h | 40 ++++++ include/linux/peci.h | 8 -- 9 files changed, 642 insertions(+), 8 deletions(-) create mode 100644 drivers/peci/cpu.c create mode 100644 include/linux/peci-cpu.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index b8511e71e55b..32a4b891fe82 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15110,6 +15110,7 @@ L: openbmc@lists.ozlabs.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/peci/ F: drivers/peci/ +F: include/linux/peci-cpu.h F: include/linux/peci.h PENSANDO ETHERNET DRIVERS diff --git a/drivers/peci/Kconfig b/drivers/peci/Kconfig index 99279df97a78..89872ad83320 100644 --- a/drivers/peci/Kconfig +++ b/drivers/peci/Kconfig @@ -16,6 +16,21 @@ menuconfig PECI if PECI +config PECI_CPU + tristate "PECI CPU" + select AUXILIARY_BUS + help + This option enables peci-cpu driver for Intel processors. It is + responsible for creating auxiliary devices that can subsequently + be used by other drivers in order to perform various + functionalities such as e.g. temperature monitoring. + + Additional drivers must be enabled in order to use the functionality + of the device. + + This driver can also be built as a module. If so, the module + will be called peci-cpu. + source "drivers/peci/controller/Kconfig" endif # PECI diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile index 917f689e147a..7de18137e738 100644 --- a/drivers/peci/Makefile +++ b/drivers/peci/Makefile @@ -3,6 +3,8 @@ # Core functionality peci-y := core.o request.o device.o sysfs.o obj-$(CONFIG_PECI) += peci.o +peci-cpu-y := cpu.o +obj-$(CONFIG_PECI_CPU) += peci-cpu.o # Hardware specific bus drivers obj-y += controller/ diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c new file mode 100644 index 000000000000..68eb61c65d34 --- /dev/null +++ b/drivers/peci/cpu.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2021 Intel Corporation + +#include +#include +#include +#include +#include + +#include "internal.h" + +/** + * peci_temp_read() - read the maximum die temperature from PECI target device + * @device: PECI device to which request is going to be sent + * @temp_raw: where to store the read temperature + * + * It uses GetTemp PECI command. + * + * Return: 0 if succeeded, other values in case errors. + */ +int peci_temp_read(struct peci_device *device, s16 *temp_raw) +{ + struct peci_request *req; + + req = peci_xfer_get_temp(device); + if (IS_ERR(req)) + return PTR_ERR(req); + + *temp_raw = peci_request_temp_read(req); + + peci_request_free(req); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(peci_temp_read, PECI_CPU); + +/** + * peci_pcs_read() - read PCS register + * @device: PECI device to which request is going to be sent + * @index: PCS index + * @param: PCS parameter + * @data: where to store the read data + * + * It uses RdPkgConfig PECI command. + * + * Return: 0 if succeeded, other values in case errors. + */ +int peci_pcs_read(struct peci_device *device, u8 index, u16 param, u32 *data) +{ + struct peci_request *req; + int ret; + + req = peci_xfer_pkg_cfg_readl(device, index, param); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = peci_request_status(req); + if (ret) + goto out_req_free; + + *data = peci_request_data_readl(req); +out_req_free: + peci_request_free(req); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(peci_pcs_read, PECI_CPU); + +/** + * peci_pci_local_read() - read 32-bit memory location using raw address + * @device: PECI device to which request is going to be sent + * @bus: bus + * @dev: device + * @func: function + * @reg: register + * @data: where to store the read data + * + * It uses RdPCIConfigLocal PECI command. + * + * Return: 0 if succeeded, other values in case errors. + */ +int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, + u16 reg, u32 *data) +{ + struct peci_request *req; + int ret; + + req = peci_xfer_pci_cfg_local_readl(device, bus, dev, func, reg); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = peci_request_status(req); + if (ret) + goto out_req_free; + + *data = peci_request_data_readl(req); +out_req_free: + peci_request_free(req); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(peci_pci_local_read, PECI_CPU); + +/** + * peci_ep_pci_local_read() - read 32-bit memory location using raw address + * @device: PECI device to which request is going to be sent + * @seg: PCI segment + * @bus: bus + * @dev: device + * @func: function + * @reg: register + * @data: where to store the read data + * + * Like &peci_pci_local_read, but it uses RdEndpointConfig PECI command. + * + * Return: 0 if succeeded, other values in case errors. + */ +int peci_ep_pci_local_read(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg, u32 *data) +{ + struct peci_request *req; + int ret; + + req = peci_xfer_ep_pci_cfg_local_readl(device, seg, bus, dev, func, reg); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = peci_request_status(req); + if (ret) + goto out_req_free; + + *data = peci_request_data_readl(req); +out_req_free: + peci_request_free(req); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(peci_ep_pci_local_read, PECI_CPU); + +/** + * peci_mmio_read() - read 32-bit memory location using 64-bit bar offset address + * @device: PECI device to which request is going to be sent + * @bar: PCI bar + * @seg: PCI segment + * @bus: bus + * @dev: device + * @func: function + * @address: 64-bit MMIO address + * @data: where to store the read data + * + * It uses RdEndpointConfig PECI command. + * + * Return: 0 if succeeded, other values in case errors. + */ +int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg, + u8 bus, u8 dev, u8 func, u64 address, u32 *data) +{ + struct peci_request *req; + int ret; + + req = peci_xfer_ep_mmio64_readl(device, bar, seg, bus, dev, func, address); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = peci_request_status(req); + if (ret) + goto out_req_free; + + *data = peci_request_data_readl(req); +out_req_free: + peci_request_free(req); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(peci_mmio_read, PECI_CPU); + +static const char * const peci_adev_types[] = { + "cputemp", + "dimmtemp", +}; + +struct peci_cpu { + struct peci_device *device; + const struct peci_device_id *id; +}; + +static void adev_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + auxiliary_device_uninit(adev); + + kfree(adev->name); + kfree(adev); +} + +static struct auxiliary_device *adev_alloc(struct peci_cpu *priv, int idx) +{ + struct peci_controller *controller = to_peci_controller(priv->device->dev.parent); + struct auxiliary_device *adev; + const char *name; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + name = kasprintf(GFP_KERNEL, "%s.%s", peci_adev_types[idx], (const char *)priv->id->data); + if (!name) { + ret = -ENOMEM; + goto free_adev; + } + + adev->name = name; + adev->dev.parent = &priv->device->dev; + adev->dev.release = adev_release; + adev->id = (controller->id << 16) | (priv->device->addr); + + ret = auxiliary_device_init(adev); + if (ret) + goto free_name; + + return adev; + +free_name: + kfree(name); +free_adev: + kfree(adev); + return ERR_PTR(ret); +} + +static void unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); +} + +static int devm_adev_add(struct device *dev, int idx) +{ + struct peci_cpu *priv = dev_get_drvdata(dev); + struct auxiliary_device *adev; + int ret; + + adev = adev_alloc(priv, idx); + if (IS_ERR(adev)) + return PTR_ERR(adev); + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + ret = devm_add_action_or_reset(&priv->device->dev, unregister_adev, adev); + if (ret) + return ret; + + return 0; +} + +static void peci_cpu_add_adevices(struct peci_cpu *priv) +{ + struct device *dev = &priv->device->dev; + int ret, i; + + for (i = 0; i < ARRAY_SIZE(peci_adev_types); i++) { + ret = devm_adev_add(dev, i); + if (ret) { + dev_warn(dev, "Failed to register PECI auxiliary: %s, ret = %d\n", + peci_adev_types[i], ret); + continue; + } + } +} + +static int +peci_cpu_probe(struct peci_device *device, const struct peci_device_id *id) +{ + struct device *dev = &device->dev; + struct peci_cpu *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + priv->device = device; + priv->id = id; + + peci_cpu_add_adevices(priv); + + return 0; +} + +static const struct peci_device_id peci_cpu_device_ids[] = { + { /* Haswell Xeon */ + .family = 6, + .model = INTEL_FAM6_HASWELL_X, + .data = "hsx", + }, + { /* Broadwell Xeon */ + .family = 6, + .model = INTEL_FAM6_BROADWELL_X, + .data = "bdx", + }, + { /* Broadwell Xeon D */ + .family = 6, + .model = INTEL_FAM6_BROADWELL_D, + .data = "bdxd", + }, + { /* Skylake Xeon */ + .family = 6, + .model = INTEL_FAM6_SKYLAKE_X, + .data = "skx", + }, + { /* Icelake Xeon */ + .family = 6, + .model = INTEL_FAM6_ICELAKE_X, + .data = "icx", + }, + { /* Icelake Xeon D */ + .family = 6, + .model = INTEL_FAM6_ICELAKE_D, + .data = "icxd", + }, + { } +}; +MODULE_DEVICE_TABLE(peci, peci_cpu_device_ids); + +static struct peci_driver peci_cpu_driver = { + .probe = peci_cpu_probe, + .id_table = peci_cpu_device_ids, + .driver = { + .name = "peci-cpu", + }, +}; +module_peci_driver(peci_cpu_driver); + +MODULE_AUTHOR("Iwona Winiarska "); +MODULE_DESCRIPTION("PECI CPU driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(PECI); diff --git a/drivers/peci/device.c b/drivers/peci/device.c index 184b5e650b0b..e6b0bffb14f4 100644 --- a/drivers/peci/device.c +++ b/drivers/peci/device.c @@ -3,6 +3,7 @@ #include #include +#include #include #include "internal.h" diff --git a/drivers/peci/internal.h b/drivers/peci/internal.h index 52c02e12874f..9d75ea54504c 100644 --- a/drivers/peci/internal.h +++ b/drivers/peci/internal.h @@ -22,6 +22,7 @@ void peci_request_free(struct peci_request *req); int peci_request_status(struct peci_request *req); u64 peci_request_dib_read(struct peci_request *req); +s16 peci_request_temp_read(struct peci_request *req); u8 peci_request_data_readb(struct peci_request *req); u16 peci_request_data_readw(struct peci_request *req); @@ -36,6 +37,32 @@ struct peci_request *peci_xfer_pkg_cfg_readw(struct peci_device *device, u8 inde struct peci_request *peci_xfer_pkg_cfg_readl(struct peci_device *device, u8 index, u16 param); struct peci_request *peci_xfer_pkg_cfg_readq(struct peci_device *device, u8 index, u16 param); +struct peci_request *peci_xfer_pci_cfg_local_readb(struct peci_device *device, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_pci_cfg_local_readw(struct peci_device *device, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_pci_cfg_local_readl(struct peci_device *device, + u8 bus, u8 dev, u8 func, u16 reg); + +struct peci_request *peci_xfer_ep_pci_cfg_local_readb(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_ep_pci_cfg_local_readw(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_ep_pci_cfg_local_readl(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); + +struct peci_request *peci_xfer_ep_pci_cfg_readb(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_ep_pci_cfg_readw(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); +struct peci_request *peci_xfer_ep_pci_cfg_readl(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg); + +struct peci_request *peci_xfer_ep_mmio32_readl(struct peci_device *device, u8 bar, u8 seg, + u8 bus, u8 dev, u8 func, u64 offset); + +struct peci_request *peci_xfer_ep_mmio64_readl(struct peci_device *device, u8 bar, u8 seg, + u8 bus, u8 dev, u8 func, u64 offset); /** * struct peci_device_id - PECI device data to match * @data: pointer to driver private data specific to device diff --git a/drivers/peci/request.c b/drivers/peci/request.c index a49eb351cda3..8d6dd7b6b559 100644 --- a/drivers/peci/request.c +++ b/drivers/peci/request.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -15,6 +16,10 @@ #define PECI_GET_DIB_WR_LEN 1 #define PECI_GET_DIB_RD_LEN 8 +#define PECI_GET_TEMP_CMD 0x01 +#define PECI_GET_TEMP_WR_LEN 1 +#define PECI_GET_TEMP_RD_LEN 2 + #define PECI_RDPKGCFG_CMD 0xa1 #define PECI_RDPKGCFG_WR_LEN 5 #define PECI_RDPKGCFG_RD_LEN_BASE 1 @@ -22,6 +27,45 @@ #define PECI_WRPKGCFG_WR_LEN_BASE 6 #define PECI_WRPKGCFG_RD_LEN 1 +#define PECI_RDIAMSR_CMD 0xb1 +#define PECI_RDIAMSR_WR_LEN 5 +#define PECI_RDIAMSR_RD_LEN 9 +#define PECI_WRIAMSR_CMD 0xb5 +#define PECI_RDIAMSREX_CMD 0xd1 +#define PECI_RDIAMSREX_WR_LEN 6 +#define PECI_RDIAMSREX_RD_LEN 9 + +#define PECI_RDPCICFG_CMD 0x61 +#define PECI_RDPCICFG_WR_LEN 6 +#define PECI_RDPCICFG_RD_LEN 5 +#define PECI_RDPCICFG_RD_LEN_MAX 24 +#define PECI_WRPCICFG_CMD 0x65 + +#define PECI_RDPCICFGLOCAL_CMD 0xe1 +#define PECI_RDPCICFGLOCAL_WR_LEN 5 +#define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1 +#define PECI_WRPCICFGLOCAL_CMD 0xe5 +#define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6 +#define PECI_WRPCICFGLOCAL_RD_LEN 1 + +#define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03 +#define PECI_ENDPTCFG_TYPE_PCI 0x04 +#define PECI_ENDPTCFG_TYPE_MMIO 0x05 +#define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04 +#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05 +#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06 +#define PECI_RDENDPTCFG_CMD 0xc1 +#define PECI_RDENDPTCFG_PCI_WR_LEN 12 +#define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10 +#define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14 +#define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18 +#define PECI_RDENDPTCFG_RD_LEN_BASE 1 +#define PECI_WRENDPTCFG_CMD 0xc5 +#define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13 +#define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15 +#define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19 +#define PECI_WRENDPTCFG_RD_LEN 1 + /* Device Specific Completion Code (CC) Definition */ #define PECI_CC_SUCCESS 0x40 #define PECI_CC_NEED_RETRY 0x80 @@ -202,6 +246,27 @@ struct peci_request *peci_xfer_get_dib(struct peci_device *device) } EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI); +struct peci_request *peci_xfer_get_temp(struct peci_device *device) +{ + struct peci_request *req; + int ret; + + req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN); + if (!req) + return ERR_PTR(-ENOMEM); + + req->tx.buf[0] = PECI_GET_TEMP_CMD; + + ret = peci_request_xfer(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} +EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI); + static struct peci_request * __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len) { @@ -226,6 +291,108 @@ __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len) return req; } +static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg) +{ + return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12; +} + +static struct peci_request * +__pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len) +{ + struct peci_request *req; + u32 pci_addr; + int ret; + + req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN, + PECI_RDPCICFGLOCAL_RD_LEN_BASE + len); + if (!req) + return ERR_PTR(-ENOMEM); + + pci_addr = __get_pci_addr(bus, dev, func, reg); + + req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD; + req->tx.buf[1] = 0; + put_unaligned_le24(pci_addr, &req->tx.buf[2]); + + ret = peci_request_xfer_retry(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} + +static struct peci_request * +__ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg, u8 len) +{ + struct peci_request *req; + u32 pci_addr; + int ret; + + req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN, + PECI_RDENDPTCFG_RD_LEN_BASE + len); + if (!req) + return ERR_PTR(-ENOMEM); + + pci_addr = __get_pci_addr(bus, dev, func, reg); + + req->tx.buf[0] = PECI_RDENDPTCFG_CMD; + req->tx.buf[1] = 0; + req->tx.buf[2] = msg_type; + req->tx.buf[3] = 0; + req->tx.buf[4] = 0; + req->tx.buf[5] = 0; + req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; + req->tx.buf[7] = seg; /* PCI Segment */ + put_unaligned_le32(pci_addr, &req->tx.buf[8]); + + ret = peci_request_xfer_retry(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} + +static struct peci_request * +__ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg, + u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len) +{ + struct peci_request *req; + int ret; + + req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len); + if (!req) + return ERR_PTR(-ENOMEM); + + req->tx.buf[0] = PECI_RDENDPTCFG_CMD; + req->tx.buf[1] = 0; + req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO; + req->tx.buf[3] = 0; /* Endpoint ID */ + req->tx.buf[4] = 0; /* Reserved */ + req->tx.buf[5] = bar; + req->tx.buf[6] = addr_type; + req->tx.buf[7] = seg; /* PCI Segment */ + req->tx.buf[8] = PCI_DEVFN(dev, func); + req->tx.buf[9] = bus; /* PCI Bus */ + + if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D) + put_unaligned_le32(offset, &req->tx.buf[10]); + else + put_unaligned_le64(offset, &req->tx.buf[10]); + + ret = peci_request_xfer_retry(req); + if (ret) { + peci_request_free(req); + return ERR_PTR(ret); + } + + return req; +} + u8 peci_request_data_readb(struct peci_request *req) { return req->rx.buf[1]; @@ -256,6 +423,12 @@ u64 peci_request_dib_read(struct peci_request *req) } EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI); +s16 peci_request_temp_read(struct peci_request *req) +{ + return get_unaligned_le16(&req->rx.buf[0]); +} +EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI); + #define __read_pkg_config(x, type) \ struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \ { \ @@ -267,3 +440,43 @@ __read_pkg_config(readb, u8); __read_pkg_config(readw, u16); __read_pkg_config(readl, u32); __read_pkg_config(readq, u64); + +#define __read_pci_config_local(x, type) \ +struct peci_request * \ +peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \ +{ \ + return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \ +} \ +EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI) + +__read_pci_config_local(readb, u8); +__read_pci_config_local(readw, u16); +__read_pci_config_local(readl, u32); + +#define __read_ep_pci_config(x, msg_type, type) \ +struct peci_request * \ +peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \ +{ \ + return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \ +} \ +EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI) + +__read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8); +__read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16); +__read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32); +__read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8); +__read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16); +__read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32); + +#define __read_ep_mmio(x, y, addr_type, type1, type2) \ +struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \ + u8 bus, u8 dev, u8 func, u64 offset) \ +{ \ + return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \ + offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \ + sizeof(type2)); \ +} \ +EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI) + +__read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32); +__read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32); diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h new file mode 100644 index 000000000000..ff8ae9c26c80 --- /dev/null +++ b/include/linux/peci-cpu.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2021 Intel Corporation */ + +#ifndef __LINUX_PECI_CPU_H +#define __LINUX_PECI_CPU_H + +#include + +#include "../../arch/x86/include/asm/intel-family.h" + +#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */ +#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */ +#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */ +#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */ +#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */ +#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */ +#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */ +#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */ +#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */ +#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */ +#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */ +#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */ + +struct peci_device; + +int peci_temp_read(struct peci_device *device, s16 *temp_raw); + +int peci_pcs_read(struct peci_device *device, u8 index, + u16 param, u32 *data); + +int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev, + u8 func, u16 reg, u32 *data); + +int peci_ep_pci_local_read(struct peci_device *device, u8 seg, + u8 bus, u8 dev, u8 func, u16 reg, u32 *data); + +int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg, + u8 bus, u8 dev, u8 func, u64 address, u32 *data); + +#endif /* __LINUX_PECI_CPU_H */ diff --git a/include/linux/peci.h b/include/linux/peci.h index 4eda423ba10c..06e6ef935297 100644 --- a/include/linux/peci.h +++ b/include/linux/peci.h @@ -14,14 +14,6 @@ */ #define PECI_REQUEST_MAX_BUF_SIZE 32 -#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */ -#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */ -#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */ -#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */ -#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */ -#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */ -#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */ - struct peci_controller; struct peci_request; -- cgit v1.2.3-71-gd317 From 4f774c4a65bf3987d1a95c966e884f38c8a942af Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 27 Jan 2022 19:25:53 -0800 Subject: cpufreq: Reintroduce ready() callback This effectively revert '4bf8e582119e ("cpufreq: Remove ready() callback")', in order to reintroduce the ready callback. This is needed in order to be able to leave the thermal pressure interrupts in the Qualcomm CPUfreq driver disabled during initialization, so that it doesn't fire while related_cpus are still 0. Signed-off-by: Bjorn Andersson [ Viresh: Added the Chinese translation as well and updated commit msg ] Signed-off-by: Viresh Kumar --- Documentation/cpu-freq/cpu-drivers.rst | 3 +++ Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst | 2 ++ drivers/cpufreq/cpufreq.c | 4 ++++ include/linux/cpufreq.h | 3 +++ 4 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/Documentation/cpu-freq/cpu-drivers.rst b/Documentation/cpu-freq/cpu-drivers.rst index 3b32336a7803..d84ededb66f9 100644 --- a/Documentation/cpu-freq/cpu-drivers.rst +++ b/Documentation/cpu-freq/cpu-drivers.rst @@ -75,6 +75,9 @@ And optionally .resume - A pointer to a per-policy resume function which is called with interrupts disabled and _before_ the governor is started again. + .ready - A pointer to a per-policy ready function which is called after + the policy is fully initialized. + .attr - A pointer to a NULL-terminated list of "struct freq_attr" which allow to export values to sysfs. diff --git a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst index 87a36044f828..2ca92042767b 100644 --- a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst +++ b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst @@ -84,6 +84,8 @@ CPUfreq核心层注册一个cpufreq_driver结构体。 .resume - 一个指向per-policy恢复函数的指针,该函数在关中断且在调节器再一次启动前被 调用。 + .ready - 一个指向per-policy准备函数的指针,该函数在策略完全初始化之后被调用。 + .attr - 一个指向NULL结尾的"struct freq_attr"列表的指针,该列表允许导出值到 sysfs。 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b8d95536ee22..80f535cc8a75 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu) kobject_uevent(&policy->kobj, KOBJ_ADD); + /* Callback for handling stuff after policy is ready */ + if (cpufreq_driver->ready) + cpufreq_driver->ready(policy); + if (cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1ab29e61b078..3522a272b74d 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -382,6 +382,9 @@ struct cpufreq_driver { int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); + struct freq_attr **attr; /* platform specific boost support code */ -- cgit v1.2.3-71-gd317 From 8cba323437a49a45756d661f500b324fc2d486fe Mon Sep 17 00:00:00 2001 From: Sean Nyekjaer Date: Tue, 8 Feb 2022 09:52:13 +0100 Subject: mtd: rawnand: protect access to rawnand devices while in suspend Prevent rawnand access while in a suspended state. Commit 013e6292aaf5 ("mtd: rawnand: Simplify the locking") allows the rawnand layer to return errors rather than waiting in a blocking wait. Tested on a iMX6ULL. Fixes: 013e6292aaf5 ("mtd: rawnand: Simplify the locking") Signed-off-by: Sean Nyekjaer Reviewed-by: Boris Brezillon Cc: stable@vger.kernel.org Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20220208085213.1838273-1-sean@geanix.com --- drivers/mtd/nand/raw/nand_base.c | 44 ++++++++++++++++++---------------------- include/linux/mtd/rawnand.h | 2 ++ 2 files changed, 22 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 3e4a525ac3ca..612ae60e9763 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -338,16 +338,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) * * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int nand_get_device(struct nand_chip *chip) +static void nand_get_device(struct nand_chip *chip) { - mutex_lock(&chip->lock); - if (chip->suspended) { + /* Wait until the device is resumed. */ + while (1) { + mutex_lock(&chip->lock); + if (!chip->suspended) { + mutex_lock(&chip->controller->lock); + return; + } mutex_unlock(&chip->lock); - return -EBUSY; - } - mutex_lock(&chip->controller->lock); - return 0; + wait_event(chip->resume_wq, !chip->suspended); + } } /** @@ -576,9 +579,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); @@ -3826,9 +3827,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4415,13 +4414,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); - int ret; + int ret = 0; ops->retlen = 0; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4481,9 +4478,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EIO; /* Grab the lock and see if the device is available */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4570,7 +4565,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - WARN_ON(nand_get_device(chip)); + nand_get_device(chip); /* Release it and go back */ nand_release_device(chip); } @@ -4587,9 +4582,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); nand_select_target(chip, chipnr); @@ -4660,6 +4653,8 @@ static void nand_resume(struct mtd_info *mtd) __func__); } mutex_unlock(&chip->lock); + + wake_up_all(&chip->resume_wq); } /** @@ -5438,6 +5433,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, chip->cur_cs = -1; mutex_init(&chip->lock); + init_waitqueue_head(&chip->resume_wq); /* Enforce the right timings for reset/detection */ chip->current_interface_config = nand_get_reset_interface_config(); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 5b88cd51fadb..dcf90144d70b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1240,6 +1240,7 @@ struct nand_secure_region { * @lock: Lock protecting the suspended field. Also used to serialize accesses * to the NAND device * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @resume_wq: wait queue to sleep if rawnand is in suspended state. * @cur_cs: Currently selected target. -1 means no target selected, otherwise we * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). * NAND Controller drivers should not modify this value, but they're @@ -1294,6 +1295,7 @@ struct nand_chip { /* Internals */ struct mutex lock; unsigned int suspended : 1; + wait_queue_head_t resume_wq; int cur_cs; int read_retries; struct nand_secure_region *secure_regions; -- cgit v1.2.3-71-gd317 From 5145abeb0649acf810a32e63bd762e617a9b3309 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 16 Dec 2021 12:16:41 +0100 Subject: mtd: nand: ecc: Provide a helper to retrieve a pilelined engine device In a pipelined engine situation, we might either have the host which internally has support for error correction, or have it using an external hardware block for this purpose. In the former case, the host is also the ECC engine. In the latter case, it is not. In order to get the right pointers on the right devices (for example: in order to devm_* allocate variables), let's introduce this helper which can safely be called by pipelined ECC engines in order to retrieve the right device structure. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-16-miquel.raynal@bootlin.com --- drivers/mtd/nand/ecc.c | 31 +++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 1 + 2 files changed, 32 insertions(+) (limited to 'include/linux') diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c index 078f5ec38de3..5250764cedee 100644 --- a/drivers/mtd/nand/ecc.c +++ b/drivers/mtd/nand/ecc.c @@ -699,6 +699,37 @@ void nand_ecc_put_on_host_hw_engine(struct nand_device *nand) } EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine); +/* + * In the case of a pipelined engine, the device registering the ECC + * engine is not necessarily the ECC engine itself but may be a host controller. + * It is then useful to provide a helper to retrieve the right device object + * which actually represents the ECC engine. + */ +struct device *nand_ecc_get_engine_dev(struct device *host) +{ + struct platform_device *ecc_pdev; + struct device_node *np; + + /* + * If the device node contains this property, it means we need to follow + * it in order to get the right ECC engine device we are looking for. + */ + np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0); + if (!np) + return host; + + ecc_pdev = of_find_device_by_node(np); + if (!ecc_pdev) { + of_node_put(np); + return NULL; + } + + platform_device_put(ecc_pdev); + of_node_put(np); + + return &ecc_pdev->dev; +} + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Miquel Raynal "); MODULE_DESCRIPTION("Generic ECC engine"); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index b617efa0a881..615b3e3a3920 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -309,6 +309,7 @@ struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand); void nand_ecc_put_on_host_hw_engine(struct nand_device *nand); +struct device *nand_ecc_get_engine_dev(struct device *host); #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); -- cgit v1.2.3-71-gd317 From 4398693a9e24bcab0b99ea219073917991d0792b Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 8 Feb 2022 11:48:31 +0100 Subject: gpiolib: make struct comments into real kernel docs We have several comments that start with '/**' but don't conform to the kernel doc standard. Add proper detailed descriptions for the affected definitions and move the docs from the forward declarations to the struct definitions where applicable. Reported-by: Randy Dunlap Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Tested-by: Randy Dunlap --- drivers/gpio/gpiolib.h | 34 ++++++++++++++++++++++++++++++++++ include/linux/gpio/consumer.h | 35 ++++++++++++++++------------------- 2 files changed, 50 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index e1fff54a937d..9380b62e152c 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -37,6 +37,9 @@ * or name of the IP component in a System on Chip. * @data: per-instance data assigned by the driver * @list: links gpio_device:s together for traversal + * @notifier: used to notify subscribers about lines being requested, released + * or reconfigured + * @pin_ranges: range of pins served by the GPIO driver * * This state container holds most of the runtime variable data * for a GPIO device and can hold references and live on after the @@ -72,6 +75,20 @@ struct gpio_device { /* gpio suffixes used for ACPI and device tree lookup */ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; +/** + * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes + * + * @desc: Array of pointers to the GPIO descriptors + * @size: Number of elements in desc + * @chip: Parent GPIO chip + * @get_mask: Get mask used in fastpath + * @set_mask: Set mask used in fastpath + * @invert_mask: Invert mask used in fastpath + * + * This structure is attached to struct gpiod_descs obtained from + * gpiod_get_array() and can be passed back to get/set array functions in order + * to activate fast processing path if applicable. + */ struct gpio_array { struct gpio_desc **desc; unsigned int size; @@ -103,6 +120,23 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, extern spinlock_t gpio_lock; extern struct list_head gpio_devices; + +/** + * struct gpio_desc - Opaque descriptor for a GPIO + * + * @gdev: Pointer to the parent GPIO device + * @flags: Binary descriptor flags + * @label: Name of the consumer + * @name: Line name + * @hog: Pointer to the device node that hogs this line (if any) + * @debounce_period_us: Debounce period in microseconds + * + * These are obtained using gpiod_get() and are preferable to the old + * integer-based handles. + * + * Contrary to integers, a pointer to a &struct gpio_desc is guaranteed to be + * valid until the GPIO is released. + */ struct gpio_desc { struct gpio_device *gdev; unsigned long flags; diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 3ad67b4a72be..c3aa8b330e1c 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -8,27 +8,16 @@ #include struct device; - -/** - * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are - * preferable to the old integer-based handles. - * - * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid - * until the GPIO is released. - */ struct gpio_desc; - -/** - * Opaque descriptor for a structure of GPIO array attributes. This structure - * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be - * passed back to get/set array functions in order to activate fast processing - * path if applicable. - */ struct gpio_array; /** - * Struct containing an array of descriptors that can be obtained using - * gpiod_get_array(). + * struct gpio_descs - Struct containing an array of descriptors that can be + * obtained using gpiod_get_array() + * + * @info: Pointer to the opaque gpio_array structure + * @ndescs: Number of held descriptors + * @desc: Array of pointers to GPIO descriptors */ struct gpio_descs { struct gpio_array *info; @@ -43,8 +32,16 @@ struct gpio_descs { #define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** - * Optional flags that can be passed to one of gpiod_* to configure direction - * and output value. These values cannot be OR'd. + * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to + * configure direction and output value. These values + * cannot be OR'd. + * + * @GPIOD_ASIS: Don't change anything + * @GPIOD_IN: Set lines to input mode + * @GPIOD_OUT_LOW: Set lines to output and drive them low + * @GPIOD_OUT_HIGH: Set lines to output and drive them high + * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low + * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high */ enum gpiod_flags { GPIOD_ASIS = 0, -- cgit v1.2.3-71-gd317 From a0386bba70934d42f586eaf68b21d5eeaffa7bd0 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sun, 23 Jan 2022 18:52:01 +0100 Subject: spi: make remove callback a void function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The value returned by an spi driver's remove function is mostly ignored. (Only an error message is printed if the value is non-zero that the error is ignored.) So change the prototype of the remove function to return no value. This way driver authors are not tempted to assume that passing an error to the upper layer is a good idea. All drivers are adapted accordingly. There is no intended change of behaviour, all callbacks were prepared to return 0 before. Signed-off-by: Uwe Kleine-König Acked-by: Marc Kleine-Budde Acked-by: Andy Shevchenko Reviewed-by: Geert Uytterhoeven Acked-by: Jérôme Pouiller Acked-by: Miquel Raynal Acked-by: Jonathan Cameron Acked-by: Claudius Heine Acked-by: Stefan Schmidt Acked-by: Alexandre Belloni Acked-by: Ulf Hansson # For MMC Acked-by: Marcus Folkesson Acked-by: Łukasz Stelmach Acked-by: Lee Jones Link: https://lore.kernel.org/r/20220123175201.34839-6-u.kleine-koenig@pengutronix.de Signed-off-by: Mark Brown --- drivers/bus/moxtet.c | 4 +--- drivers/char/tpm/st33zp24/spi.c | 4 +--- drivers/char/tpm/tpm_tis_spi_main.c | 3 +-- drivers/clk/clk-lmk04832.c | 4 +--- drivers/gpio/gpio-74x164.c | 4 +--- drivers/gpio/gpio-max3191x.c | 4 +--- drivers/gpio/gpio-max7301.c | 4 +--- drivers/gpio/gpio-mc33880.c | 4 +--- drivers/gpio/gpio-pisosr.c | 4 +--- drivers/gpu/drm/panel/panel-abt-y030xx067a.c | 4 +--- drivers/gpu/drm/panel/panel-ilitek-ili9322.c | 4 +--- drivers/gpu/drm/panel/panel-ilitek-ili9341.c | 3 +-- drivers/gpu/drm/panel/panel-innolux-ej030na.c | 4 +--- drivers/gpu/drm/panel/panel-lg-lb035q02.c | 4 +--- drivers/gpu/drm/panel/panel-lg-lg4573.c | 4 +--- drivers/gpu/drm/panel/panel-nec-nl8048hl11.c | 4 +--- drivers/gpu/drm/panel/panel-novatek-nt39016.c | 4 +--- drivers/gpu/drm/panel/panel-samsung-db7430.c | 3 +-- drivers/gpu/drm/panel/panel-samsung-ld9040.c | 4 +--- drivers/gpu/drm/panel/panel-samsung-s6d27a1.c | 3 +-- drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c | 3 +-- drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 4 +--- drivers/gpu/drm/panel/panel-sony-acx565akm.c | 4 +--- drivers/gpu/drm/panel/panel-tpo-td028ttec1.c | 4 +--- drivers/gpu/drm/panel/panel-tpo-td043mtea1.c | 4 +--- drivers/gpu/drm/panel/panel-tpo-tpg110.c | 3 +-- drivers/gpu/drm/panel/panel-widechips-ws2401.c | 3 +-- drivers/gpu/drm/tiny/hx8357d.c | 4 +--- drivers/gpu/drm/tiny/ili9163.c | 4 +--- drivers/gpu/drm/tiny/ili9225.c | 4 +--- drivers/gpu/drm/tiny/ili9341.c | 4 +--- drivers/gpu/drm/tiny/ili9486.c | 4 +--- drivers/gpu/drm/tiny/mi0283qt.c | 4 +--- drivers/gpu/drm/tiny/repaper.c | 4 +--- drivers/gpu/drm/tiny/st7586.c | 4 +--- drivers/gpu/drm/tiny/st7735r.c | 4 +--- drivers/hwmon/adcxx.c | 4 +--- drivers/hwmon/adt7310.c | 3 +-- drivers/hwmon/max1111.c | 3 +-- drivers/hwmon/max31722.c | 4 +--- drivers/iio/accel/bma400_spi.c | 4 +--- drivers/iio/accel/bmc150-accel-spi.c | 4 +--- drivers/iio/accel/bmi088-accel-spi.c | 4 +--- drivers/iio/accel/kxsd9-spi.c | 4 +--- drivers/iio/accel/mma7455_spi.c | 4 +--- drivers/iio/accel/sca3000.c | 4 +--- drivers/iio/adc/ad7266.c | 4 +--- drivers/iio/adc/ltc2496.c | 4 +--- drivers/iio/adc/mcp320x.c | 4 +--- drivers/iio/adc/mcp3911.c | 4 +--- drivers/iio/adc/ti-adc12138.c | 4 +--- drivers/iio/adc/ti-ads7950.c | 4 +--- drivers/iio/adc/ti-ads8688.c | 4 +--- drivers/iio/adc/ti-tlc4541.c | 4 +--- drivers/iio/amplifiers/ad8366.c | 4 +--- drivers/iio/common/ssp_sensors/ssp_dev.c | 4 +--- drivers/iio/dac/ad5360.c | 4 +--- drivers/iio/dac/ad5380.c | 4 +--- drivers/iio/dac/ad5446.c | 4 +--- drivers/iio/dac/ad5449.c | 4 +--- drivers/iio/dac/ad5504.c | 4 +--- drivers/iio/dac/ad5592r.c | 4 +--- drivers/iio/dac/ad5624r_spi.c | 4 +--- drivers/iio/dac/ad5686-spi.c | 4 +--- drivers/iio/dac/ad5761.c | 4 +--- drivers/iio/dac/ad5764.c | 4 +--- drivers/iio/dac/ad5791.c | 4 +--- drivers/iio/dac/ad8801.c | 4 +--- drivers/iio/dac/ltc1660.c | 4 +--- drivers/iio/dac/ltc2632.c | 4 +--- drivers/iio/dac/mcp4922.c | 4 +--- drivers/iio/dac/ti-dac082s085.c | 4 +--- drivers/iio/dac/ti-dac7311.c | 3 +-- drivers/iio/frequency/adf4350.c | 4 +--- drivers/iio/gyro/bmg160_spi.c | 4 +--- drivers/iio/gyro/fxas21002c_spi.c | 4 +--- drivers/iio/health/afe4403.c | 4 +--- drivers/iio/magnetometer/bmc150_magn_spi.c | 4 +--- drivers/iio/magnetometer/hmc5843_spi.c | 4 +--- drivers/iio/potentiometer/max5487.c | 4 +--- drivers/iio/pressure/ms5611_spi.c | 4 +--- drivers/iio/pressure/zpa2326_spi.c | 4 +--- drivers/input/keyboard/applespi.c | 4 +--- drivers/input/misc/adxl34x-spi.c | 4 +--- drivers/input/touchscreen/ads7846.c | 4 +--- drivers/input/touchscreen/cyttsp4_spi.c | 4 +--- drivers/input/touchscreen/tsc2005.c | 4 +--- drivers/leds/leds-cr0014114.c | 4 +--- drivers/leds/leds-dac124s085.c | 4 +--- drivers/leds/leds-el15203000.c | 4 +--- drivers/leds/leds-spi-byte.c | 4 +--- drivers/media/spi/cxd2880-spi.c | 4 +--- drivers/media/spi/gs1662.c | 4 +--- drivers/media/tuners/msi001.c | 3 +-- drivers/mfd/arizona-spi.c | 4 +--- drivers/mfd/da9052-spi.c | 3 +-- drivers/mfd/ezx-pcap.c | 4 +--- drivers/mfd/madera-spi.c | 4 +--- drivers/mfd/mc13xxx-spi.c | 3 +-- drivers/mfd/rsmu_spi.c | 4 +--- drivers/mfd/stmpe-spi.c | 4 +--- drivers/mfd/tps65912-spi.c | 4 +--- drivers/misc/ad525x_dpot-spi.c | 3 +-- drivers/misc/eeprom/eeprom_93xx46.c | 4 +--- drivers/misc/lattice-ecp3-config.c | 4 +--- drivers/misc/lis3lv02d/lis3lv02d_spi.c | 4 +--- drivers/mmc/host/mmc_spi.c | 3 +-- drivers/mtd/devices/mchp23k256.c | 4 +--- drivers/mtd/devices/mchp48l640.c | 4 +--- drivers/mtd/devices/mtd_dataflash.c | 4 +--- drivers/mtd/devices/sst25l.c | 4 +--- drivers/net/can/m_can/tcan4x5x-core.c | 4 +--- drivers/net/can/spi/hi311x.c | 4 +--- drivers/net/can/spi/mcp251x.c | 4 +--- drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c | 4 +--- drivers/net/dsa/b53/b53_spi.c | 4 +--- drivers/net/dsa/microchip/ksz8795_spi.c | 4 +--- drivers/net/dsa/microchip/ksz9477_spi.c | 4 +--- drivers/net/dsa/sja1105/sja1105_main.c | 6 ++---- drivers/net/dsa/vitesse-vsc73xx-spi.c | 6 ++---- drivers/net/ethernet/asix/ax88796c_main.c | 4 +--- drivers/net/ethernet/micrel/ks8851_spi.c | 4 +--- drivers/net/ethernet/microchip/enc28j60.c | 4 +--- drivers/net/ethernet/microchip/encx24j600.c | 4 +--- drivers/net/ethernet/qualcomm/qca_spi.c | 4 +--- drivers/net/ethernet/vertexcom/mse102x.c | 4 +--- drivers/net/ethernet/wiznet/w5100-spi.c | 4 +--- drivers/net/ieee802154/adf7242.c | 4 +--- drivers/net/ieee802154/at86rf230.c | 4 +--- drivers/net/ieee802154/ca8210.c | 6 ++---- drivers/net/ieee802154/cc2520.c | 4 +--- drivers/net/ieee802154/mcr20a.c | 4 +--- drivers/net/ieee802154/mrf24j40.c | 4 +--- drivers/net/phy/spi_ks8995.c | 4 +--- drivers/net/wan/slic_ds26522.c | 3 +-- drivers/net/wireless/intersil/p54/p54spi.c | 4 +--- drivers/net/wireless/marvell/libertas/if_spi.c | 4 +--- drivers/net/wireless/microchip/wilc1000/spi.c | 4 +--- drivers/net/wireless/st/cw1200/cw1200_spi.c | 4 +--- drivers/net/wireless/ti/wl1251/spi.c | 4 +--- drivers/net/wireless/ti/wlcore/spi.c | 4 +--- drivers/nfc/nfcmrvl/spi.c | 3 +-- drivers/nfc/st-nci/spi.c | 4 +--- drivers/nfc/st95hf/core.c | 4 +--- drivers/nfc/trf7970a.c | 4 +--- drivers/platform/chrome/cros_ec_spi.c | 4 +--- drivers/platform/olpc/olpc-xo175-ec.c | 4 +--- drivers/rtc/rtc-ds1302.c | 3 +-- drivers/rtc/rtc-ds1305.c | 4 +--- drivers/rtc/rtc-ds1343.c | 4 +--- drivers/spi/spi-mem.c | 6 ++---- drivers/spi/spi-slave-system-control.c | 3 +-- drivers/spi/spi-slave-time.c | 3 +-- drivers/spi/spi-tle62x0.c | 3 +-- drivers/spi/spi.c | 11 ++--------- drivers/spi/spidev.c | 4 +--- drivers/staging/fbtft/fbtft.h | 3 +-- drivers/staging/pi433/pi433_if.c | 4 +--- drivers/staging/wfx/bus_spi.c | 3 +-- drivers/tty/serial/max3100.c | 5 ++--- drivers/tty/serial/max310x.c | 3 +-- drivers/tty/serial/sc16is7xx.c | 4 +--- drivers/usb/gadget/udc/max3420_udc.c | 4 +--- drivers/usb/host/max3421-hcd.c | 3 +-- drivers/video/backlight/ams369fg06.c | 3 +-- drivers/video/backlight/corgi_lcd.c | 3 +-- drivers/video/backlight/ili922x.c | 3 +-- drivers/video/backlight/l4f00242t03.c | 3 +-- drivers/video/backlight/lms501kf03.c | 3 +-- drivers/video/backlight/ltv350qv.c | 3 +-- drivers/video/backlight/tdo24m.c | 3 +-- drivers/video/backlight/tosa_lcd.c | 4 +--- drivers/video/backlight/vgg2432a4.c | 4 +--- drivers/video/fbdev/omap/lcd_mipid.c | 4 +--- .../fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c | 4 +--- .../video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c | 4 +--- .../video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c | 4 +--- .../video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c | 4 +--- .../video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c | 4 +--- include/linux/spi/spi.h | 2 +- sound/pci/hda/cs35l41_hda_spi.c | 4 +--- sound/soc/codecs/adau1761-spi.c | 3 +-- sound/soc/codecs/adau1781-spi.c | 3 +-- sound/soc/codecs/cs35l41-spi.c | 4 +--- sound/soc/codecs/pcm3168a-spi.c | 4 +--- sound/soc/codecs/pcm512x-spi.c | 3 +-- sound/soc/codecs/tlv320aic32x4-spi.c | 4 +--- sound/soc/codecs/tlv320aic3x-spi.c | 4 +--- sound/soc/codecs/wm0010.c | 4 +--- sound/soc/codecs/wm8804-spi.c | 3 +-- sound/spi/at73c213.c | 4 +--- 191 files changed, 197 insertions(+), 545 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index fd87a59837fa..5eb0fe73ddc4 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi) return 0; } -static int moxtet_remove(struct spi_device *spi) +static void moxtet_remove(struct spi_device *spi) { struct moxtet *moxtet = spi_get_drvdata(spi); @@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi) device_for_each_child(moxtet->dev, NULL, __unregister); mutex_destroy(&moxtet->lock); - - return 0; } static const struct of_device_id moxtet_dt_ids[] = { diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index ccd9e42b8eab..22d184884694 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -381,13 +381,11 @@ static int st33zp24_spi_probe(struct spi_device *dev) * @param: client, the spi_device description (TPM SPI description). * @return: 0 in case of success. */ -static int st33zp24_spi_remove(struct spi_device *dev) +static void st33zp24_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); st33zp24_remove(chip); - - return 0; } static const struct spi_device_id st33zp24_spi_id[] = { diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index aaa59a00eeae..184396b3af50 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -254,13 +254,12 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi) static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume); -static int tpm_tis_spi_remove(struct spi_device *dev) +static void tpm_tis_spi_remove(struct spi_device *dev) { struct tpm_chip *chip = spi_get_drvdata(dev); tpm_chip_unregister(chip); tpm_tis_remove(chip); - return 0; } static const struct spi_device_id tpm_tis_spi_id[] = { diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c index 8f02c0b88000..f416f8bc2898 100644 --- a/drivers/clk/clk-lmk04832.c +++ b/drivers/clk/clk-lmk04832.c @@ -1544,14 +1544,12 @@ err_disable_oscin: return ret; } -static int lmk04832_remove(struct spi_device *spi) +static void lmk04832_remove(struct spi_device *spi) { struct lmk04832 *lmk = spi_get_drvdata(spi); clk_disable_unprepare(lmk->oscin); of_clk_del_provider(spi->dev.of_node); - - return 0; } static const struct spi_device_id lmk04832_id[] = { { "lmk04832", LMK04832 }, diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index 4a55cdf089d6..e00c33310517 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -163,15 +163,13 @@ exit_destroy: return ret; } -static int gen_74x164_remove(struct spi_device *spi) +static void gen_74x164_remove(struct spi_device *spi) { struct gen_74x164_chip *chip = spi_get_drvdata(spi); gpiod_set_value_cansleep(chip->gpiod_oe, 0); gpiochip_remove(&chip->gpio_chip); mutex_destroy(&chip->lock); - - return 0; } static const struct spi_device_id gen_74x164_spi_ids[] = { diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c index 51cd6f98d1c7..161c4751c5f7 100644 --- a/drivers/gpio/gpio-max3191x.c +++ b/drivers/gpio/gpio-max3191x.c @@ -443,14 +443,12 @@ static int max3191x_probe(struct spi_device *spi) return 0; } -static int max3191x_remove(struct spi_device *spi) +static void max3191x_remove(struct spi_device *spi) { struct max3191x_chip *max3191x = spi_get_drvdata(spi); gpiochip_remove(&max3191x->gpio); mutex_destroy(&max3191x->lock); - - return 0; } static int __init max3191x_register_driver(struct spi_driver *sdrv) diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c index 5862d73bf325..11813f41d460 100644 --- a/drivers/gpio/gpio-max7301.c +++ b/drivers/gpio/gpio-max7301.c @@ -64,11 +64,9 @@ static int max7301_probe(struct spi_device *spi) return ret; } -static int max7301_remove(struct spi_device *spi) +static void max7301_remove(struct spi_device *spi) { __max730x_remove(&spi->dev); - - return 0; } static const struct spi_device_id max7301_id[] = { diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index 31d2be1bebc8..cd9b16dbe1a9 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -134,7 +134,7 @@ exit_destroy: return ret; } -static int mc33880_remove(struct spi_device *spi) +static void mc33880_remove(struct spi_device *spi) { struct mc33880 *mc; @@ -142,8 +142,6 @@ static int mc33880_remove(struct spi_device *spi) gpiochip_remove(&mc->chip); mutex_destroy(&mc->lock); - - return 0; } static struct spi_driver mc33880_driver = { diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c index 8e04054cf07e..81a47ae09ff8 100644 --- a/drivers/gpio/gpio-pisosr.c +++ b/drivers/gpio/gpio-pisosr.c @@ -163,15 +163,13 @@ static int pisosr_gpio_probe(struct spi_device *spi) return 0; } -static int pisosr_gpio_remove(struct spi_device *spi) +static void pisosr_gpio_remove(struct spi_device *spi) { struct pisosr_gpio *gpio = spi_get_drvdata(spi); gpiochip_remove(&gpio->chip); mutex_destroy(&gpio->lock); - - return 0; } static const struct spi_device_id pisosr_gpio_id_table[] = { diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index f043b484055b..ed626fdc08e8 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -293,15 +293,13 @@ static int y030xx067a_probe(struct spi_device *spi) return 0; } -static int y030xx067a_remove(struct spi_device *spi) +static void y030xx067a_remove(struct spi_device *spi) { struct y030xx067a *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); - - return 0; } static const struct drm_display_mode y030xx067a_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 8e84df9a0033..3dfafa585127 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -896,14 +896,12 @@ static int ili9322_probe(struct spi_device *spi) return 0; } -static int ili9322_remove(struct spi_device *spi) +static void ili9322_remove(struct spi_device *spi) { struct ili9322 *ili = spi_get_drvdata(spi); ili9322_power_off(ili); drm_panel_remove(&ili->panel); - - return 0; } /* diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 2c3378a259b1..a07ef26234e5 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -728,7 +728,7 @@ static int ili9341_probe(struct spi_device *spi) return -1; } -static int ili9341_remove(struct spi_device *spi) +static void ili9341_remove(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); struct ili9341 *ili = spi_get_drvdata(spi); @@ -741,7 +741,6 @@ static int ili9341_remove(struct spi_device *spi) drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); } - return 0; } static void ili9341_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c index c558de3f99be..e3b1daa0cb72 100644 --- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c +++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c @@ -219,15 +219,13 @@ static int ej030na_probe(struct spi_device *spi) return 0; } -static int ej030na_remove(struct spi_device *spi) +static void ej030na_remove(struct spi_device *spi) { struct ej030na *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); - - return 0; } static const struct drm_display_mode ej030na_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index f3183b68704f..9d0d4faa3f58 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -203,14 +203,12 @@ static int lb035q02_probe(struct spi_device *spi) return 0; } -static int lb035q02_remove(struct spi_device *spi) +static void lb035q02_remove(struct spi_device *spi) { struct lb035q02_device *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); - - return 0; } static const struct of_device_id lb035q02_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index 8e5160af1de5..cf246d15b7b6 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -266,14 +266,12 @@ static int lg4573_probe(struct spi_device *spi) return 0; } -static int lg4573_remove(struct spi_device *spi) +static void lg4573_remove(struct spi_device *spi) { struct lg4573 *ctx = spi_get_drvdata(spi); lg4573_display_off(ctx); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id lg4573_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index 6e5ab1debc8b..81c5c541a351 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -212,15 +212,13 @@ static int nl8048_probe(struct spi_device *spi) return 0; } -static int nl8048_remove(struct spi_device *spi) +static void nl8048_remove(struct spi_device *spi) { struct nl8048_panel *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id nl8048_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index d036853db865..f58cfb10b58a 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -292,7 +292,7 @@ static int nt39016_probe(struct spi_device *spi) return 0; } -static int nt39016_remove(struct spi_device *spi) +static void nt39016_remove(struct spi_device *spi) { struct nt39016 *panel = spi_get_drvdata(spi); @@ -300,8 +300,6 @@ static int nt39016_remove(struct spi_device *spi) nt39016_disable(&panel->drm_panel); nt39016_unprepare(&panel->drm_panel); - - return 0; } static const struct drm_display_mode kd035g6_display_modes[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c index ead479719f00..04640c5256a8 100644 --- a/drivers/gpu/drm/panel/panel-samsung-db7430.c +++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c @@ -314,12 +314,11 @@ static int db7430_probe(struct spi_device *spi) return 0; } -static int db7430_remove(struct spi_device *spi) +static void db7430_remove(struct spi_device *spi) { struct db7430 *db = spi_get_drvdata(spi); drm_panel_remove(&db->panel); - return 0; } /* diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index c4b388850a13..01eb211f32f7 100644 --- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -358,14 +358,12 @@ static int ld9040_probe(struct spi_device *spi) return 0; } -static int ld9040_remove(struct spi_device *spi) +static void ld9040_remove(struct spi_device *spi) { struct ld9040 *ctx = spi_get_drvdata(spi); ld9040_power_off(ctx); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id ld9040_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c index 1696ceb36aa0..2adb223a895c 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c @@ -291,12 +291,11 @@ static int s6d27a1_probe(struct spi_device *spi) return 0; } -static int s6d27a1_remove(struct spi_device *spi) +static void s6d27a1_remove(struct spi_device *spi) { struct s6d27a1 *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); - return 0; } static const struct of_device_id s6d27a1_match[] = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c index c178d962b0d5..d99afcc672ca 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -62,10 +62,9 @@ static int s6e63m0_spi_probe(struct spi_device *spi) s6e63m0_spi_dcs_write, false); } -static int s6e63m0_spi_remove(struct spi_device *spi) +static void s6e63m0_spi_remove(struct spi_device *spi) { s6e63m0_remove(&spi->dev); - return 0; } static const struct of_device_id s6e63m0_spi_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 61e565524542..bbc4569cbcdc 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -387,13 +387,11 @@ static int st7789v_probe(struct spi_device *spi) return 0; } -static int st7789v_remove(struct spi_device *spi) +static void st7789v_remove(struct spi_device *spi) { struct st7789v *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); - - return 0; } static const struct of_device_id st7789v_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index ba0b3ead150f..0d7541a33f87 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -655,7 +655,7 @@ static int acx565akm_probe(struct spi_device *spi) return 0; } -static int acx565akm_remove(struct spi_device *spi) +static void acx565akm_remove(struct spi_device *spi) { struct acx565akm_panel *lcd = spi_get_drvdata(spi); @@ -666,8 +666,6 @@ static int acx565akm_remove(struct spi_device *spi) drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id acx565akm_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index ba0c00d1a001..4dbf8b88f264 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -350,15 +350,13 @@ static int td028ttec1_probe(struct spi_device *spi) return 0; } -static int td028ttec1_remove(struct spi_device *spi) +static void td028ttec1_remove(struct spi_device *spi) { struct td028ttec1_panel *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); - - return 0; } static const struct of_device_id td028ttec1_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index 1866cdb8f9c1..cf4609bb9b1d 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -463,7 +463,7 @@ static int td043mtea1_probe(struct spi_device *spi) return 0; } -static int td043mtea1_remove(struct spi_device *spi) +static void td043mtea1_remove(struct spi_device *spi) { struct td043mtea1_panel *lcd = spi_get_drvdata(spi); @@ -472,8 +472,6 @@ static int td043mtea1_remove(struct spi_device *spi) drm_panel_unprepare(&lcd->panel); sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group); - - return 0; } static const struct of_device_id td043mtea1_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index e3791dad6830..0b1f5a11a055 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -450,12 +450,11 @@ static int tpg110_probe(struct spi_device *spi) return 0; } -static int tpg110_remove(struct spi_device *spi) +static void tpg110_remove(struct spi_device *spi) { struct tpg110 *tpg = spi_get_drvdata(spi); drm_panel_remove(&tpg->panel); - return 0; } static const struct of_device_id tpg110_match[] = { diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c index 8bc976f54b80..236f3cb2b594 100644 --- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c +++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c @@ -407,12 +407,11 @@ static int ws2401_probe(struct spi_device *spi) return 0; } -static int ws2401_remove(struct spi_device *spi) +static void ws2401_remove(struct spi_device *spi) { struct ws2401 *ws = spi_get_drvdata(spi); drm_panel_remove(&ws->panel); - return 0; } /* diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 9b33c05732aa..ebb025543f8d 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -263,14 +263,12 @@ static int hx8357d_probe(struct spi_device *spi) return 0; } -static int hx8357d_remove(struct spi_device *spi) +static void hx8357d_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void hx8357d_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index bcc181351236..fc8ed245b0bc 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -193,14 +193,12 @@ static int ili9163_probe(struct spi_device *spi) return 0; } -static int ili9163_remove(struct spi_device *spi) +static void ili9163_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9163_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 976d3209f164..cc92eb9f2a07 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -411,14 +411,12 @@ static int ili9225_probe(struct spi_device *spi) return 0; } -static int ili9225_remove(struct spi_device *spi) +static void ili9225_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9225_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 37e0c33399c8..5b8cc770ee7b 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -225,14 +225,12 @@ static int ili9341_probe(struct spi_device *spi) return 0; } -static int ili9341_remove(struct spi_device *spi) +static void ili9341_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9341_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index e9a63f4b2993..6d655e18e0aa 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -243,14 +243,12 @@ static int ili9486_probe(struct spi_device *spi) return 0; } -static int ili9486_remove(struct spi_device *spi) +static void ili9486_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void ili9486_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 023de49e7a8e..5e060f6910bb 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -233,14 +233,12 @@ static int mi0283qt_probe(struct spi_device *spi) return 0; } -static int mi0283qt_remove(struct spi_device *spi) +static void mi0283qt_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void mi0283qt_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 97a775c48cea..beeeb170d0b1 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -1140,14 +1140,12 @@ static int repaper_probe(struct spi_device *spi) return 0; } -static int repaper_remove(struct spi_device *spi) +static void repaper_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void repaper_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 51b9b9fb3ead..3f38faa1cd8c 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -360,14 +360,12 @@ static int st7586_probe(struct spi_device *spi) return 0; } -static int st7586_remove(struct spi_device *spi) +static void st7586_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void st7586_shutdown(struct spi_device *spi) diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index fc40dd10efa8..29d618093e94 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -247,14 +247,12 @@ static int st7735r_probe(struct spi_device *spi) return 0; } -static int st7735r_remove(struct spi_device *spi) +static void st7735r_remove(struct spi_device *spi) { struct drm_device *drm = spi_get_drvdata(spi); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void st7735r_shutdown(struct spi_device *spi) diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c index e5bc5ce09f4e..de37bce24fa6 100644 --- a/drivers/hwmon/adcxx.c +++ b/drivers/hwmon/adcxx.c @@ -194,7 +194,7 @@ out_err: return status; } -static int adcxx_remove(struct spi_device *spi) +static void adcxx_remove(struct spi_device *spi) { struct adcxx *adc = spi_get_drvdata(spi); int i; @@ -205,8 +205,6 @@ static int adcxx_remove(struct spi_device *spi) device_remove_file(&spi->dev, &ad_input[i].dev_attr); mutex_unlock(&adc->lock); - - return 0; } static const struct spi_device_id adcxx_ids[] = { diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c index c40cac16af68..832d9ec64934 100644 --- a/drivers/hwmon/adt7310.c +++ b/drivers/hwmon/adt7310.c @@ -88,10 +88,9 @@ static int adt7310_spi_probe(struct spi_device *spi) &adt7310_spi_ops); } -static int adt7310_spi_remove(struct spi_device *spi) +static void adt7310_spi_remove(struct spi_device *spi) { adt7x10_remove(&spi->dev, spi->irq); - return 0; } static const struct spi_device_id adt7310_id[] = { diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index 5fcfd57df61e..4c5487aeb3cf 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c @@ -254,7 +254,7 @@ err_remove: return err; } -static int max1111_remove(struct spi_device *spi) +static void max1111_remove(struct spi_device *spi) { struct max1111_data *data = spi_get_drvdata(spi); @@ -265,7 +265,6 @@ static int max1111_remove(struct spi_device *spi) sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); mutex_destroy(&data->drvdata_lock); - return 0; } static const struct spi_device_id max1111_ids[] = { diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c index 4cf4fe6809a3..93e048ee4955 100644 --- a/drivers/hwmon/max31722.c +++ b/drivers/hwmon/max31722.c @@ -100,7 +100,7 @@ static int max31722_probe(struct spi_device *spi) return 0; } -static int max31722_remove(struct spi_device *spi) +static void max31722_remove(struct spi_device *spi) { struct max31722_data *data = spi_get_drvdata(spi); int ret; @@ -111,8 +111,6 @@ static int max31722_remove(struct spi_device *spi) if (ret) /* There is nothing we can do about this ... */ dev_warn(&spi->dev, "Failed to put device in stand-by mode\n"); - - return 0; } static int __maybe_unused max31722_suspend(struct device *dev) diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c index 9f622e37477b..9040a717b247 100644 --- a/drivers/iio/accel/bma400_spi.c +++ b/drivers/iio/accel/bma400_spi.c @@ -87,11 +87,9 @@ static int bma400_spi_probe(struct spi_device *spi) return bma400_probe(&spi->dev, regmap, id->name); } -static int bma400_spi_remove(struct spi_device *spi) +static void bma400_spi_remove(struct spi_device *spi) { bma400_remove(&spi->dev); - - return 0; } static const struct spi_device_id bma400_spi_ids[] = { diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c index 11559567cb39..80007cc2d044 100644 --- a/drivers/iio/accel/bmc150-accel-spi.c +++ b/drivers/iio/accel/bmc150-accel-spi.c @@ -35,11 +35,9 @@ static int bmc150_accel_probe(struct spi_device *spi) true); } -static int bmc150_accel_remove(struct spi_device *spi) +static void bmc150_accel_remove(struct spi_device *spi) { bmc150_accel_core_remove(&spi->dev); - - return 0; } static const struct acpi_device_id bmc150_accel_acpi_match[] = { diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c index 758ad2f12896..06d99d9949f3 100644 --- a/drivers/iio/accel/bmi088-accel-spi.c +++ b/drivers/iio/accel/bmi088-accel-spi.c @@ -56,11 +56,9 @@ static int bmi088_accel_probe(struct spi_device *spi) true); } -static int bmi088_accel_remove(struct spi_device *spi) +static void bmi088_accel_remove(struct spi_device *spi) { bmi088_accel_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmi088_accel_id[] = { diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c index 441e6b764281..57c451cfb9e5 100644 --- a/drivers/iio/accel/kxsd9-spi.c +++ b/drivers/iio/accel/kxsd9-spi.c @@ -32,11 +32,9 @@ static int kxsd9_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->name); } -static int kxsd9_spi_remove(struct spi_device *spi) +static void kxsd9_spi_remove(struct spi_device *spi) { kxsd9_common_remove(&spi->dev); - - return 0; } static const struct spi_device_id kxsd9_spi_id[] = { diff --git a/drivers/iio/accel/mma7455_spi.c b/drivers/iio/accel/mma7455_spi.c index ecf690692dcc..b746031551a3 100644 --- a/drivers/iio/accel/mma7455_spi.c +++ b/drivers/iio/accel/mma7455_spi.c @@ -22,11 +22,9 @@ static int mma7455_spi_probe(struct spi_device *spi) return mma7455_core_probe(&spi->dev, regmap, id->name); } -static int mma7455_spi_remove(struct spi_device *spi) +static void mma7455_spi_remove(struct spi_device *spi) { mma7455_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id mma7455_spi_ids[] = { diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 43ecacbdc95a..83c81072511e 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1524,7 +1524,7 @@ error_ret: return ret; } -static int sca3000_remove(struct spi_device *spi) +static void sca3000_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct sca3000_state *st = iio_priv(indio_dev); @@ -1535,8 +1535,6 @@ static int sca3000_remove(struct spi_device *spi) sca3000_stop_all_interrupts(st); if (spi->irq) free_irq(spi->irq, indio_dev); - - return 0; } static const struct spi_device_id sca3000_id[] = { diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 1d345d66742d..c17d9b5fbaf6 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -479,7 +479,7 @@ error_disable_reg: return ret; } -static int ad7266_remove(struct spi_device *spi) +static void ad7266_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7266_state *st = iio_priv(indio_dev); @@ -488,8 +488,6 @@ static int ad7266_remove(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad7266_id[] = { diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c index dd956a7c216e..5a55f79f2574 100644 --- a/drivers/iio/adc/ltc2496.c +++ b/drivers/iio/adc/ltc2496.c @@ -78,13 +78,11 @@ static int ltc2496_probe(struct spi_device *spi) return ltc2497core_probe(dev, indio_dev); } -static int ltc2496_remove(struct spi_device *spi) +static void ltc2496_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); ltc2497core_remove(indio_dev); - - return 0; } static const struct of_device_id ltc2496_of_match[] = { diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d1cff28cae0..b4c69acb33e3 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -459,15 +459,13 @@ reg_disable: return ret; } -static int mcp320x_remove(struct spi_device *spi) +static void mcp320x_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp320x *adc = iio_priv(indio_dev); iio_device_unregister(indio_dev); regulator_disable(adc->reg); - - return 0; } static const struct of_device_id mcp320x_dt_ids[] = { diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index 13535f148c4c..1cb4590fe412 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -321,7 +321,7 @@ reg_disable: return ret; } -static int mcp3911_remove(struct spi_device *spi) +static void mcp3911_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp3911 *adc = iio_priv(indio_dev); @@ -331,8 +331,6 @@ static int mcp3911_remove(struct spi_device *spi) clk_disable_unprepare(adc->clki); if (adc->vref) regulator_disable(adc->vref); - - return 0; } static const struct of_device_id mcp3911_dt_ids[] = { diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c index 6eb62b564dae..59d75d09604f 100644 --- a/drivers/iio/adc/ti-adc12138.c +++ b/drivers/iio/adc/ti-adc12138.c @@ -503,7 +503,7 @@ err_clk_disable: return ret; } -static int adc12138_remove(struct spi_device *spi) +static void adc12138_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct adc12138 *adc = iio_priv(indio_dev); @@ -514,8 +514,6 @@ static int adc12138_remove(struct spi_device *spi) regulator_disable(adc->vref_n); regulator_disable(adc->vref_p); clk_disable_unprepare(adc->cclk); - - return 0; } static const struct of_device_id adc12138_dt_ids[] = { diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c index a7efa3eada2c..e3658b969c5b 100644 --- a/drivers/iio/adc/ti-ads7950.c +++ b/drivers/iio/adc/ti-ads7950.c @@ -662,7 +662,7 @@ error_destroy_mutex: return ret; } -static int ti_ads7950_remove(struct spi_device *spi) +static void ti_ads7950_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_ads7950_state *st = iio_priv(indio_dev); @@ -672,8 +672,6 @@ static int ti_ads7950_remove(struct spi_device *spi) iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); mutex_destroy(&st->slock); - - return 0; } static const struct spi_device_id ti_ads7950_id[] = { diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 2e24717d7f55..22c2583eedd0 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -479,7 +479,7 @@ err_regulator_disable: return ret; } -static int ads8688_remove(struct spi_device *spi) +static void ads8688_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ads8688_state *st = iio_priv(indio_dev); @@ -489,8 +489,6 @@ static int ads8688_remove(struct spi_device *spi) if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ads8688_id[] = { diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c index 403b787f9f7e..2406eda9dfc6 100644 --- a/drivers/iio/adc/ti-tlc4541.c +++ b/drivers/iio/adc/ti-tlc4541.c @@ -224,7 +224,7 @@ error_disable_reg: return ret; } -static int tlc4541_remove(struct spi_device *spi) +static void tlc4541_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct tlc4541_state *st = iio_priv(indio_dev); @@ -232,8 +232,6 @@ static int tlc4541_remove(struct spi_device *spi) iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); regulator_disable(st->reg); - - return 0; } static const struct of_device_id tlc4541_dt_ids[] = { diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c index cfcf18a0bce8..1134ae12e531 100644 --- a/drivers/iio/amplifiers/ad8366.c +++ b/drivers/iio/amplifiers/ad8366.c @@ -298,7 +298,7 @@ error_disable_reg: return ret; } -static int ad8366_remove(struct spi_device *spi) +static void ad8366_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad8366_state *st = iio_priv(indio_dev); @@ -308,8 +308,6 @@ static int ad8366_remove(struct spi_device *spi) if (!IS_ERR(reg)) regulator_disable(reg); - - return 0; } static const struct spi_device_id ad8366_id[] = { diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c index 1aee87100038..eafaf4529df5 100644 --- a/drivers/iio/common/ssp_sensors/ssp_dev.c +++ b/drivers/iio/common/ssp_sensors/ssp_dev.c @@ -586,7 +586,7 @@ err_setup_irq: return ret; } -static int ssp_remove(struct spi_device *spi) +static void ssp_remove(struct spi_device *spi) { struct ssp_data *data = spi_get_drvdata(spi); @@ -608,8 +608,6 @@ static int ssp_remove(struct spi_device *spi) mutex_destroy(&data->pending_lock); mfd_remove_devices(&spi->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c index 2d3b14c407d8..ecbc6a51d60f 100644 --- a/drivers/iio/dac/ad5360.c +++ b/drivers/iio/dac/ad5360.c @@ -521,7 +521,7 @@ error_free_channels: return ret; } -static int ad5360_remove(struct spi_device *spi) +static void ad5360_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5360_state *st = iio_priv(indio_dev); @@ -531,8 +531,6 @@ static int ad5360_remove(struct spi_device *spi) kfree(indio_dev->channels); regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg); - - return 0; } static const struct spi_device_id ad5360_ids[] = { diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c index e38860a6a9f3..82e1d9bd773e 100644 --- a/drivers/iio/dac/ad5380.c +++ b/drivers/iio/dac/ad5380.c @@ -488,11 +488,9 @@ static int ad5380_spi_probe(struct spi_device *spi) return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name); } -static int ad5380_spi_remove(struct spi_device *spi) +static void ad5380_spi_remove(struct spi_device *spi) { ad5380_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5380_spi_ids[] = { diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index 1c9b54c012a7..14cfabacbea5 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -491,11 +491,9 @@ static int ad5446_spi_probe(struct spi_device *spi) &ad5446_spi_chip_info[id->driver_data]); } -static int ad5446_spi_remove(struct spi_device *spi) +static void ad5446_spi_remove(struct spi_device *spi) { ad5446_remove(&spi->dev); - - return 0; } static struct spi_driver ad5446_spi_driver = { diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c index f5e93c6acc9d..bad9bdaafa94 100644 --- a/drivers/iio/dac/ad5449.c +++ b/drivers/iio/dac/ad5449.c @@ -330,7 +330,7 @@ error_disable_reg: return ret; } -static int ad5449_spi_remove(struct spi_device *spi) +static void ad5449_spi_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5449 *st = iio_priv(indio_dev); @@ -338,8 +338,6 @@ static int ad5449_spi_remove(struct spi_device *spi) iio_device_unregister(indio_dev); regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg); - - return 0; } static const struct spi_device_id ad5449_spi_ids[] = { diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index b631261efa97..8507573aa13e 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -336,7 +336,7 @@ error_disable_reg: return ret; } -static int ad5504_remove(struct spi_device *spi) +static void ad5504_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5504_state *st = iio_priv(indio_dev); @@ -345,8 +345,6 @@ static int ad5504_remove(struct spi_device *spi) if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad5504_id[] = { diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c index 6bfd7951e18c..0f7abfa75bec 100644 --- a/drivers/iio/dac/ad5592r.c +++ b/drivers/iio/dac/ad5592r.c @@ -130,11 +130,9 @@ static int ad5592r_spi_probe(struct spi_device *spi) return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops); } -static int ad5592r_spi_remove(struct spi_device *spi) +static void ad5592r_spi_remove(struct spi_device *spi) { ad5592r_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5592r_spi_ids[] = { diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 3c98941b9f99..371e812850eb 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -293,7 +293,7 @@ error_disable_reg: return ret; } -static int ad5624r_remove(struct spi_device *spi) +static void ad5624r_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5624r_state *st = iio_priv(indio_dev); @@ -301,8 +301,6 @@ static int ad5624r_remove(struct spi_device *spi) iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) regulator_disable(st->reg); - - return 0; } static const struct spi_device_id ad5624r_id[] = { diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c index 2628810fdbb1..d26fb29b6b04 100644 --- a/drivers/iio/dac/ad5686-spi.c +++ b/drivers/iio/dac/ad5686-spi.c @@ -95,11 +95,9 @@ static int ad5686_spi_probe(struct spi_device *spi) ad5686_spi_write, ad5686_spi_read); } -static int ad5686_spi_remove(struct spi_device *spi) +static void ad5686_spi_remove(struct spi_device *spi) { ad5686_remove(&spi->dev); - - return 0; } static const struct spi_device_id ad5686_spi_id[] = { diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c index e37e095e94fc..4cb8471db81e 100644 --- a/drivers/iio/dac/ad5761.c +++ b/drivers/iio/dac/ad5761.c @@ -394,7 +394,7 @@ disable_regulator_err: return ret; } -static int ad5761_remove(struct spi_device *spi) +static void ad5761_remove(struct spi_device *spi) { struct iio_dev *iio_dev = spi_get_drvdata(spi); struct ad5761_state *st = iio_priv(iio_dev); @@ -403,8 +403,6 @@ static int ad5761_remove(struct spi_device *spi) if (!IS_ERR_OR_NULL(st->vref_reg)) regulator_disable(st->vref_reg); - - return 0; } static const struct spi_device_id ad5761_id[] = { diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c index ae089b9145cb..d235a8047ba0 100644 --- a/drivers/iio/dac/ad5764.c +++ b/drivers/iio/dac/ad5764.c @@ -332,7 +332,7 @@ error_disable_reg: return ret; } -static int ad5764_remove(struct spi_device *spi) +static void ad5764_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5764_state *st = iio_priv(indio_dev); @@ -341,8 +341,6 @@ static int ad5764_remove(struct spi_device *spi) if (st->chip_info->int_vref == 0) regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg); - - return 0; } static const struct spi_device_id ad5764_ids[] = { diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index 7b4579d73d18..2b14914b4050 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -428,7 +428,7 @@ error_disable_reg_pos: return ret; } -static int ad5791_remove(struct spi_device *spi) +static void ad5791_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5791_state *st = iio_priv(indio_dev); @@ -439,8 +439,6 @@ static int ad5791_remove(struct spi_device *spi) if (!IS_ERR(st->reg_vss)) regulator_disable(st->reg_vss); - - return 0; } static const struct spi_device_id ad5791_id[] = { diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c index 5ecfdad54dec..6be35c92d435 100644 --- a/drivers/iio/dac/ad8801.c +++ b/drivers/iio/dac/ad8801.c @@ -193,7 +193,7 @@ error_disable_vrefh_reg: return ret; } -static int ad8801_remove(struct spi_device *spi) +static void ad8801_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad8801_state *state = iio_priv(indio_dev); @@ -202,8 +202,6 @@ static int ad8801_remove(struct spi_device *spi) if (state->vrefl_reg) regulator_disable(state->vrefl_reg); regulator_disable(state->vrefh_reg); - - return 0; } static const struct spi_device_id ad8801_ids[] = { diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c index f6ec9bf5815e..c76233c9bb72 100644 --- a/drivers/iio/dac/ltc1660.c +++ b/drivers/iio/dac/ltc1660.c @@ -206,15 +206,13 @@ error_disable_reg: return ret; } -static int ltc1660_remove(struct spi_device *spi) +static void ltc1660_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ltc1660_priv *priv = iio_priv(indio_dev); iio_device_unregister(indio_dev); regulator_disable(priv->vref_reg); - - return 0; } static const struct of_device_id ltc1660_dt_ids[] = { diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c index 53e4b887d372..aed46c80757e 100644 --- a/drivers/iio/dac/ltc2632.c +++ b/drivers/iio/dac/ltc2632.c @@ -372,7 +372,7 @@ static int ltc2632_probe(struct spi_device *spi) return iio_device_register(indio_dev); } -static int ltc2632_remove(struct spi_device *spi) +static void ltc2632_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ltc2632_state *st = iio_priv(indio_dev); @@ -381,8 +381,6 @@ static int ltc2632_remove(struct spi_device *spi) if (st->vref_reg) regulator_disable(st->vref_reg); - - return 0; } static const struct spi_device_id ltc2632_id[] = { diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c index 0ae414ee1716..cb9e60e71b91 100644 --- a/drivers/iio/dac/mcp4922.c +++ b/drivers/iio/dac/mcp4922.c @@ -172,7 +172,7 @@ error_disable_reg: return ret; } -static int mcp4922_remove(struct spi_device *spi) +static void mcp4922_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct mcp4922_state *state; @@ -180,8 +180,6 @@ static int mcp4922_remove(struct spi_device *spi) iio_device_unregister(indio_dev); state = iio_priv(indio_dev); regulator_disable(state->vref_reg); - - return 0; } static const struct spi_device_id mcp4922_id[] = { diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c index 6beda2193683..4e1156e6deb2 100644 --- a/drivers/iio/dac/ti-dac082s085.c +++ b/drivers/iio/dac/ti-dac082s085.c @@ -313,7 +313,7 @@ err: return ret; } -static int ti_dac_remove(struct spi_device *spi) +static void ti_dac_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_dac_chip *ti_dac = iio_priv(indio_dev); @@ -321,8 +321,6 @@ static int ti_dac_remove(struct spi_device *spi) iio_device_unregister(indio_dev); mutex_destroy(&ti_dac->lock); regulator_disable(ti_dac->vref); - - return 0; } static const struct of_device_id ti_dac_of_id[] = { diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c index 99f275829ec2..e10d17e60ed3 100644 --- a/drivers/iio/dac/ti-dac7311.c +++ b/drivers/iio/dac/ti-dac7311.c @@ -292,7 +292,7 @@ err: return ret; } -static int ti_dac_remove(struct spi_device *spi) +static void ti_dac_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ti_dac_chip *ti_dac = iio_priv(indio_dev); @@ -300,7 +300,6 @@ static int ti_dac_remove(struct spi_device *spi) iio_device_unregister(indio_dev); mutex_destroy(&ti_dac->lock); regulator_disable(ti_dac->vref); - return 0; } static const struct of_device_id ti_dac_of_id[] = { diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 3d9eba716b69..f3521330f6fb 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -589,7 +589,7 @@ error_disable_clk: return ret; } -static int adf4350_remove(struct spi_device *spi) +static void adf4350_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct adf4350_state *st = iio_priv(indio_dev); @@ -604,8 +604,6 @@ static int adf4350_remove(struct spi_device *spi) if (!IS_ERR(reg)) regulator_disable(reg); - - return 0; } static const struct of_device_id adf4350_of_match[] = { diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c index 745962e1e423..fc2e453527b9 100644 --- a/drivers/iio/gyro/bmg160_spi.c +++ b/drivers/iio/gyro/bmg160_spi.c @@ -27,11 +27,9 @@ static int bmg160_spi_probe(struct spi_device *spi) return bmg160_core_probe(&spi->dev, regmap, spi->irq, id->name); } -static int bmg160_spi_remove(struct spi_device *spi) +static void bmg160_spi_remove(struct spi_device *spi) { bmg160_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmg160_spi_id[] = { diff --git a/drivers/iio/gyro/fxas21002c_spi.c b/drivers/iio/gyro/fxas21002c_spi.c index 77ceebef4e34..c3ac169facf9 100644 --- a/drivers/iio/gyro/fxas21002c_spi.c +++ b/drivers/iio/gyro/fxas21002c_spi.c @@ -34,11 +34,9 @@ static int fxas21002c_spi_probe(struct spi_device *spi) return fxas21002c_core_probe(&spi->dev, regmap, spi->irq, id->name); } -static int fxas21002c_spi_remove(struct spi_device *spi) +static void fxas21002c_spi_remove(struct spi_device *spi) { fxas21002c_core_remove(&spi->dev); - - return 0; } static const struct spi_device_id fxas21002c_spi_id[] = { diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 273f16dcaff8..856ec901b091 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -570,7 +570,7 @@ err_disable_reg: return ret; } -static int afe4403_remove(struct spi_device *spi) +static void afe4403_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct afe4403_data *afe = iio_priv(indio_dev); @@ -586,8 +586,6 @@ static int afe4403_remove(struct spi_device *spi) ret = regulator_disable(afe->regulator); if (ret) dev_warn(afe->dev, "Unable to disable regulator\n"); - - return 0; } static const struct spi_device_id afe4403_ids[] = { diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c index c6ed3ea8460a..4c570412d65c 100644 --- a/drivers/iio/magnetometer/bmc150_magn_spi.c +++ b/drivers/iio/magnetometer/bmc150_magn_spi.c @@ -29,11 +29,9 @@ static int bmc150_magn_spi_probe(struct spi_device *spi) return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name); } -static int bmc150_magn_spi_remove(struct spi_device *spi) +static void bmc150_magn_spi_remove(struct spi_device *spi) { bmc150_magn_remove(&spi->dev); - - return 0; } static const struct spi_device_id bmc150_magn_spi_id[] = { diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c index 89cf59a62c28..a99dd9b33e95 100644 --- a/drivers/iio/magnetometer/hmc5843_spi.c +++ b/drivers/iio/magnetometer/hmc5843_spi.c @@ -74,11 +74,9 @@ static int hmc5843_spi_probe(struct spi_device *spi) id->driver_data, id->name); } -static int hmc5843_spi_remove(struct spi_device *spi) +static void hmc5843_spi_remove(struct spi_device *spi) { hmc5843_common_remove(&spi->dev); - - return 0; } static const struct spi_device_id hmc5843_id[] = { diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c index 007c2bd324cb..42723c996c9f 100644 --- a/drivers/iio/potentiometer/max5487.c +++ b/drivers/iio/potentiometer/max5487.c @@ -112,7 +112,7 @@ static int max5487_spi_probe(struct spi_device *spi) return iio_device_register(indio_dev); } -static int max5487_spi_remove(struct spi_device *spi) +static void max5487_spi_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); int ret; @@ -123,8 +123,6 @@ static int max5487_spi_remove(struct spi_device *spi) ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV); if (ret) dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n"); - - return 0; } static const struct spi_device_id max5487_id[] = { diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 9fa2dcd71760..7ccd960ced5d 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -107,11 +107,9 @@ static int ms5611_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->driver_data); } -static int ms5611_spi_remove(struct spi_device *spi) +static void ms5611_spi_remove(struct spi_device *spi) { ms5611_remove(spi_get_drvdata(spi)); - - return 0; } static const struct of_device_id ms5611_spi_matches[] = { diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c index 85201a4bae44..ee8ed77536ca 100644 --- a/drivers/iio/pressure/zpa2326_spi.c +++ b/drivers/iio/pressure/zpa2326_spi.c @@ -57,11 +57,9 @@ static int zpa2326_probe_spi(struct spi_device *spi) spi->irq, ZPA2326_DEVICE_ID, regmap); } -static int zpa2326_remove_spi(struct spi_device *spi) +static void zpa2326_remove_spi(struct spi_device *spi) { zpa2326_remove(&spi->dev); - - return 0; } static const struct spi_device_id zpa2326_spi_ids[] = { diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c index eda1b23002b5..d1f5354d5ea2 100644 --- a/drivers/input/keyboard/applespi.c +++ b/drivers/input/keyboard/applespi.c @@ -1858,7 +1858,7 @@ static void applespi_drain_reads(struct applespi_data *applespi) spin_unlock_irqrestore(&applespi->cmd_msg_lock, flags); } -static int applespi_remove(struct spi_device *spi) +static void applespi_remove(struct spi_device *spi) { struct applespi_data *applespi = spi_get_drvdata(spi); @@ -1871,8 +1871,6 @@ static int applespi_remove(struct spi_device *spi) applespi_drain_reads(applespi); debugfs_remove_recursive(applespi->debugfs_root); - - return 0; } static void applespi_shutdown(struct spi_device *spi) diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c index 6e51c9bc619f..91e44d4c66f7 100644 --- a/drivers/input/misc/adxl34x-spi.c +++ b/drivers/input/misc/adxl34x-spi.c @@ -87,13 +87,11 @@ static int adxl34x_spi_probe(struct spi_device *spi) return 0; } -static int adxl34x_spi_remove(struct spi_device *spi) +static void adxl34x_spi_remove(struct spi_device *spi) { struct adxl34x *ac = spi_get_drvdata(spi); adxl34x_remove(ac); - - return 0; } static int __maybe_unused adxl34x_spi_suspend(struct device *dev) diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a25a77dd9a32..bed68a68f330 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1411,13 +1411,11 @@ static int ads7846_probe(struct spi_device *spi) return 0; } -static int ads7846_remove(struct spi_device *spi) +static void ads7846_remove(struct spi_device *spi) { struct ads7846 *ts = spi_get_drvdata(spi); ads7846_stop(ts); - - return 0; } static struct spi_driver ads7846_driver = { diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c index 2aec41eb76b7..5d7db84f2749 100644 --- a/drivers/input/touchscreen/cyttsp4_spi.c +++ b/drivers/input/touchscreen/cyttsp4_spi.c @@ -164,12 +164,10 @@ static int cyttsp4_spi_probe(struct spi_device *spi) return PTR_ERR_OR_ZERO(ts); } -static int cyttsp4_spi_remove(struct spi_device *spi) +static void cyttsp4_spi_remove(struct spi_device *spi) { struct cyttsp4 *ts = spi_get_drvdata(spi); cyttsp4_remove(ts); - - return 0; } static struct spi_driver cyttsp4_spi_driver = { diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c index a2f55920b9b2..555dfe98b3c4 100644 --- a/drivers/input/touchscreen/tsc2005.c +++ b/drivers/input/touchscreen/tsc2005.c @@ -64,11 +64,9 @@ static int tsc2005_probe(struct spi_device *spi) tsc2005_cmd); } -static int tsc2005_remove(struct spi_device *spi) +static void tsc2005_remove(struct spi_device *spi) { tsc200x_remove(&spi->dev); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c index d03cfd3c0bfb..c87686bd7c18 100644 --- a/drivers/leds/leds-cr0014114.c +++ b/drivers/leds/leds-cr0014114.c @@ -266,14 +266,12 @@ static int cr0014114_probe(struct spi_device *spi) return 0; } -static int cr0014114_remove(struct spi_device *spi) +static void cr0014114_remove(struct spi_device *spi) { struct cr0014114 *priv = spi_get_drvdata(spi); cancel_delayed_work_sync(&priv->work); mutex_destroy(&priv->lock); - - return 0; } static const struct of_device_id cr0014114_dt_ids[] = { diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c index 20dc9b9d7dea..cf5fb1195f87 100644 --- a/drivers/leds/leds-dac124s085.c +++ b/drivers/leds/leds-dac124s085.c @@ -85,15 +85,13 @@ eledcr: return ret; } -static int dac124s085_remove(struct spi_device *spi) +static void dac124s085_remove(struct spi_device *spi) { struct dac124s085 *dac = spi_get_drvdata(spi); int i; for (i = 0; i < ARRAY_SIZE(dac->leds); i++) led_classdev_unregister(&dac->leds[i].ldev); - - return 0; } static struct spi_driver dac124s085_driver = { diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c index f9eb59a25570..7e7b617bcd56 100644 --- a/drivers/leds/leds-el15203000.c +++ b/drivers/leds/leds-el15203000.c @@ -315,13 +315,11 @@ static int el15203000_probe(struct spi_device *spi) return el15203000_probe_dt(priv); } -static int el15203000_remove(struct spi_device *spi) +static void el15203000_remove(struct spi_device *spi) { struct el15203000 *priv = spi_get_drvdata(spi); mutex_destroy(&priv->lock); - - return 0; } static const struct of_device_id el15203000_dt_ids[] = { diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c index f1964c96fb15..2bc5c99daf51 100644 --- a/drivers/leds/leds-spi-byte.c +++ b/drivers/leds/leds-spi-byte.c @@ -130,13 +130,11 @@ static int spi_byte_probe(struct spi_device *spi) return 0; } -static int spi_byte_remove(struct spi_device *spi) +static void spi_byte_remove(struct spi_device *spi) { struct spi_byte_led *led = spi_get_drvdata(spi); mutex_destroy(&led->mutex); - - return 0; } static struct spi_driver spi_byte_driver = { diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c index 6f2a66bc87fb..6be4e5528879 100644 --- a/drivers/media/spi/cxd2880-spi.c +++ b/drivers/media/spi/cxd2880-spi.c @@ -625,7 +625,7 @@ fail_regulator: return ret; } -static int +static void cxd2880_spi_remove(struct spi_device *spi) { struct cxd2880_dvb_spi *dvb_spi = spi_get_drvdata(spi); @@ -643,8 +643,6 @@ cxd2880_spi_remove(struct spi_device *spi) kfree(dvb_spi); pr_info("cxd2880_spi remove ok.\n"); - - return 0; } static const struct spi_device_id cxd2880_spi_id[] = { diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c index f86ef1ca1288..75c21a93e6d0 100644 --- a/drivers/media/spi/gs1662.c +++ b/drivers/media/spi/gs1662.c @@ -458,13 +458,11 @@ static int gs_probe(struct spi_device *spi) return ret; } -static int gs_remove(struct spi_device *spi) +static void gs_remove(struct spi_device *spi) { struct v4l2_subdev *sd = spi_get_drvdata(spi); v4l2_device_unregister_subdev(sd); - - return 0; } static struct spi_driver gs_driver = { diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c index 44247049a319..ad6c72c1ed04 100644 --- a/drivers/media/tuners/msi001.c +++ b/drivers/media/tuners/msi001.c @@ -472,7 +472,7 @@ err: return ret; } -static int msi001_remove(struct spi_device *spi) +static void msi001_remove(struct spi_device *spi) { struct v4l2_subdev *sd = spi_get_drvdata(spi); struct msi001_dev *dev = sd_to_msi001_dev(sd); @@ -486,7 +486,6 @@ static int msi001_remove(struct spi_device *spi) v4l2_device_unregister_subdev(&dev->sd); v4l2_ctrl_handler_free(&dev->hdl); kfree(dev); - return 0; } static const struct spi_device_id msi001_id_table[] = { diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c index 9fe06dda3782..03620c8efe34 100644 --- a/drivers/mfd/arizona-spi.c +++ b/drivers/mfd/arizona-spi.c @@ -206,13 +206,11 @@ static int arizona_spi_probe(struct spi_device *spi) return arizona_dev_init(arizona); } -static int arizona_spi_remove(struct spi_device *spi) +static void arizona_spi_remove(struct spi_device *spi) { struct arizona *arizona = spi_get_drvdata(spi); arizona_dev_exit(arizona); - - return 0; } static const struct spi_device_id arizona_spi_ids[] = { diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c index 5faf3766a5e2..b79a57b45c1e 100644 --- a/drivers/mfd/da9052-spi.c +++ b/drivers/mfd/da9052-spi.c @@ -55,12 +55,11 @@ static int da9052_spi_probe(struct spi_device *spi) return da9052_device_init(da9052, id->driver_data); } -static int da9052_spi_remove(struct spi_device *spi) +static void da9052_spi_remove(struct spi_device *spi) { struct da9052 *da9052 = spi_get_drvdata(spi); da9052_device_exit(da9052); - return 0; } static const struct spi_device_id da9052_spi_id[] = { diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 70fa18b04ad2..2280f756f422 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -392,7 +392,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap, return ret; } -static int ezx_pcap_remove(struct spi_device *spi) +static void ezx_pcap_remove(struct spi_device *spi) { struct pcap_chip *pcap = spi_get_drvdata(spi); unsigned long flags; @@ -412,8 +412,6 @@ static int ezx_pcap_remove(struct spi_device *spi) irq_set_chip_and_handler(i, NULL, NULL); destroy_workqueue(pcap->workqueue); - - return 0; } static int ezx_pcap_probe(struct spi_device *spi) diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c index e860f5ff0933..da84eb50e53a 100644 --- a/drivers/mfd/madera-spi.c +++ b/drivers/mfd/madera-spi.c @@ -112,13 +112,11 @@ static int madera_spi_probe(struct spi_device *spi) return madera_dev_init(madera); } -static int madera_spi_remove(struct spi_device *spi) +static void madera_spi_remove(struct spi_device *spi) { struct madera *madera = spi_get_drvdata(spi); madera_dev_exit(madera); - - return 0; } static const struct spi_device_id madera_spi_ids[] = { diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c index 4d8913d647e6..f803527e5819 100644 --- a/drivers/mfd/mc13xxx-spi.c +++ b/drivers/mfd/mc13xxx-spi.c @@ -166,10 +166,9 @@ static int mc13xxx_spi_probe(struct spi_device *spi) return mc13xxx_common_init(&spi->dev); } -static int mc13xxx_spi_remove(struct spi_device *spi) +static void mc13xxx_spi_remove(struct spi_device *spi) { mc13xxx_common_exit(&spi->dev); - return 0; } static struct spi_driver mc13xxx_spi_driver = { diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c index fec2b4ec477c..d2f3d8f1e05a 100644 --- a/drivers/mfd/rsmu_spi.c +++ b/drivers/mfd/rsmu_spi.c @@ -220,13 +220,11 @@ static int rsmu_spi_probe(struct spi_device *client) return rsmu_core_init(rsmu); } -static int rsmu_spi_remove(struct spi_device *client) +static void rsmu_spi_remove(struct spi_device *client) { struct rsmu_ddata *rsmu = spi_get_drvdata(client); rsmu_core_exit(rsmu); - - return 0; } static const struct spi_device_id rsmu_spi_id[] = { diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index 6c5915016be5..ad8055a0e286 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c @@ -102,13 +102,11 @@ stmpe_spi_probe(struct spi_device *spi) return stmpe_probe(&spi_ci, id->driver_data); } -static int stmpe_spi_remove(struct spi_device *spi) +static void stmpe_spi_remove(struct spi_device *spi) { struct stmpe *stmpe = spi_get_drvdata(spi); stmpe_remove(stmpe); - - return 0; } static const struct of_device_id stmpe_spi_of_match[] = { diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index d701926aa46e..bba38fbc781d 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -50,13 +50,11 @@ static int tps65912_spi_probe(struct spi_device *spi) return tps65912_device_init(tps); } -static int tps65912_spi_remove(struct spi_device *spi) +static void tps65912_spi_remove(struct spi_device *spi) { struct tps65912 *tps = spi_get_drvdata(spi); tps65912_device_exit(tps); - - return 0; } static const struct spi_device_id tps65912_spi_id_table[] = { diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c index a9e75d80ad36..263055bda48b 100644 --- a/drivers/misc/ad525x_dpot-spi.c +++ b/drivers/misc/ad525x_dpot-spi.c @@ -90,10 +90,9 @@ static int ad_dpot_spi_probe(struct spi_device *spi) spi_get_device_id(spi)->name); } -static int ad_dpot_spi_remove(struct spi_device *spi) +static void ad_dpot_spi_remove(struct spi_device *spi) { ad_dpot_remove(&spi->dev); - return 0; } static const struct spi_device_id ad_dpot_spi_id[] = { diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 1f15399e5cb4..b630625b3024 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -555,14 +555,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi) return 0; } -static int eeprom_93xx46_remove(struct spi_device *spi) +static void eeprom_93xx46_remove(struct spi_device *spi) { struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); if (!(edev->pdata->flags & EE_READONLY)) device_remove_file(&spi->dev, &dev_attr_erase); - - return 0; } static struct spi_driver eeprom_93xx46_driver = { diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 98828030b5a4..bac4df2e5231 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -211,13 +211,11 @@ static int lattice_ecp3_probe(struct spi_device *spi) return 0; } -static int lattice_ecp3_remove(struct spi_device *spi) +static void lattice_ecp3_remove(struct spi_device *spi) { struct fpga_data *data = spi_get_drvdata(spi); wait_for_completion(&data->fw_loaded); - - return 0; } static const struct spi_device_id lattice_ecp3_id[] = { diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c index 9e40dfb60742..203a108b8883 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -96,15 +96,13 @@ static int lis302dl_spi_probe(struct spi_device *spi) return lis3lv02d_init_device(&lis3_dev); } -static int lis302dl_spi_remove(struct spi_device *spi) +static void lis302dl_spi_remove(struct spi_device *spi) { struct lis3lv02d *lis3 = spi_get_drvdata(spi); lis3lv02d_joystick_disable(lis3); lis3lv02d_poweroff(lis3); lis3lv02d_remove_fs(&lis3_dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index a576181e9db0..106dd204b1a7 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1489,7 +1489,7 @@ nomem: } -static int mmc_spi_remove(struct spi_device *spi) +static void mmc_spi_remove(struct spi_device *spi) { struct mmc_host *mmc = dev_get_drvdata(&spi->dev); struct mmc_spi_host *host = mmc_priv(mmc); @@ -1507,7 +1507,6 @@ static int mmc_spi_remove(struct spi_device *spi) spi->max_speed_hz = mmc->f_max; mmc_spi_put_pdata(spi); mmc_free_host(mmc); - return 0; } static const struct spi_device_id mmc_spi_dev_ids[] = { diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c index a8b31bddf14b..008df9d8898d 100644 --- a/drivers/mtd/devices/mchp23k256.c +++ b/drivers/mtd/devices/mchp23k256.c @@ -209,13 +209,11 @@ static int mchp23k256_probe(struct spi_device *spi) return 0; } -static int mchp23k256_remove(struct spi_device *spi) +static void mchp23k256_remove(struct spi_device *spi) { struct mchp23k256_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static const struct of_device_id mchp23k256_of_table[] = { diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c index 231a10790196..a3fd426df74b 100644 --- a/drivers/mtd/devices/mchp48l640.c +++ b/drivers/mtd/devices/mchp48l640.c @@ -341,13 +341,11 @@ static int mchp48l640_probe(struct spi_device *spi) return 0; } -static int mchp48l640_remove(struct spi_device *spi) +static void mchp48l640_remove(struct spi_device *spi) { struct mchp48l640_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static const struct of_device_id mchp48l640_of_table[] = { diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 734878abaa23..134e27328597 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -916,7 +916,7 @@ static int dataflash_probe(struct spi_device *spi) return status; } -static int dataflash_remove(struct spi_device *spi) +static void dataflash_remove(struct spi_device *spi) { struct dataflash *flash = spi_get_drvdata(spi); @@ -925,8 +925,6 @@ static int dataflash_remove(struct spi_device *spi) WARN_ON(mtd_device_unregister(&flash->mtd)); kfree(flash); - - return 0; } static struct spi_driver dataflash_driver = { diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index 7f124c1bfa40..8813994ce9f4 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c @@ -398,13 +398,11 @@ static int sst25l_probe(struct spi_device *spi) return 0; } -static int sst25l_remove(struct spi_device *spi) +static void sst25l_remove(struct spi_device *spi) { struct sst25l_flash *flash = spi_get_drvdata(spi); WARN_ON(mtd_device_unregister(&flash->mtd)); - - return 0; } static struct spi_driver sst25l_driver = { diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 04687b15b250..41645a24384c 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -388,7 +388,7 @@ out_power: return ret; } -static int tcan4x5x_can_remove(struct spi_device *spi) +static void tcan4x5x_can_remove(struct spi_device *spi) { struct tcan4x5x_priv *priv = spi_get_drvdata(spi); @@ -397,8 +397,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi) tcan4x5x_power_enable(priv->power, 0); m_can_class_free_dev(priv->cdev.net); - - return 0; } static const struct of_device_id tcan4x5x_of_match[] = { diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index cfcc14fe3e42..664b8f14d7b0 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -948,7 +948,7 @@ static int hi3110_can_probe(struct spi_device *spi) return dev_err_probe(dev, ret, "Probe failed\n"); } -static int hi3110_can_remove(struct spi_device *spi) +static void hi3110_can_remove(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -960,8 +960,6 @@ static int hi3110_can_remove(struct spi_device *spi) clk_disable_unprepare(priv->clk); free_candev(net); - - return 0; } static int __maybe_unused hi3110_can_suspend(struct device *dev) diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 025e07cb7439..d23edaf22420 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1427,7 +1427,7 @@ out_free: return ret; } -static int mcp251x_can_remove(struct spi_device *spi) +static void mcp251x_can_remove(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; @@ -1442,8 +1442,6 @@ static int mcp251x_can_remove(struct spi_device *spi) clk_disable_unprepare(priv->clk); free_candev(net); - - return 0; } static int __maybe_unused mcp251x_can_suspend(struct device *dev) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index b5986df6eca0..65c9b31666a6 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -1966,7 +1966,7 @@ static int mcp251xfd_probe(struct spi_device *spi) return err; } -static int mcp251xfd_remove(struct spi_device *spi) +static void mcp251xfd_remove(struct spi_device *spi) { struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; @@ -1975,8 +1975,6 @@ static int mcp251xfd_remove(struct spi_device *spi) mcp251xfd_unregister(priv); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); - - return 0; } static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 2b88f03e5252..0e54b2a0c211 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -314,7 +314,7 @@ static int b53_spi_probe(struct spi_device *spi) return 0; } -static int b53_spi_remove(struct spi_device *spi) +static void b53_spi_remove(struct spi_device *spi) { struct b53_device *dev = spi_get_drvdata(spi); @@ -322,8 +322,6 @@ static int b53_spi_remove(struct spi_device *spi) b53_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void b53_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 866767b70d65..673589dc88ab 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -87,7 +87,7 @@ static int ksz8795_spi_probe(struct spi_device *spi) return 0; } -static int ksz8795_spi_remove(struct spi_device *spi) +static void ksz8795_spi_remove(struct spi_device *spi) { struct ksz_device *dev = spi_get_drvdata(spi); @@ -95,8 +95,6 @@ static int ksz8795_spi_remove(struct spi_device *spi) ksz_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void ksz8795_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index e3cb0e6c9f6f..940bb9665f15 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -65,7 +65,7 @@ static int ksz9477_spi_probe(struct spi_device *spi) return 0; } -static int ksz9477_spi_remove(struct spi_device *spi) +static void ksz9477_spi_remove(struct spi_device *spi) { struct ksz_device *dev = spi_get_drvdata(spi); @@ -73,8 +73,6 @@ static int ksz9477_spi_remove(struct spi_device *spi) ksz_switch_remove(dev); spi_set_drvdata(spi, NULL); - - return 0; } static void ksz9477_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b513713be610..c2a47c6693b8 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3346,18 +3346,16 @@ static int sja1105_probe(struct spi_device *spi) return dsa_register_switch(priv->ds); } -static int sja1105_remove(struct spi_device *spi) +static void sja1105_remove(struct spi_device *spi) { struct sja1105_private *priv = spi_get_drvdata(spi); if (!priv) - return 0; + return; dsa_unregister_switch(priv->ds); spi_set_drvdata(spi, NULL); - - return 0; } static void sja1105_shutdown(struct spi_device *spi) diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c index 645398901e05..3110895358d8 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-spi.c +++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c @@ -159,18 +159,16 @@ static int vsc73xx_spi_probe(struct spi_device *spi) return vsc73xx_probe(&vsc_spi->vsc); } -static int vsc73xx_spi_remove(struct spi_device *spi) +static void vsc73xx_spi_remove(struct spi_device *spi) { struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi); if (!vsc_spi) - return 0; + return; vsc73xx_remove(&vsc_spi->vsc); spi_set_drvdata(spi, NULL); - - return 0; } static void vsc73xx_spi_shutdown(struct spi_device *spi) diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c index e7a9f9863258..bf70481bb1ca 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.c +++ b/drivers/net/ethernet/asix/ax88796c_main.c @@ -1102,7 +1102,7 @@ err: return ret; } -static int ax88796c_remove(struct spi_device *spi) +static void ax88796c_remove(struct spi_device *spi) { struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev); struct net_device *ndev = ax_local->ndev; @@ -1112,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi) netif_info(ax_local, probe, ndev, "removing network device %s %s\n", dev_driver_string(&spi->dev), dev_name(&spi->dev)); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 0303e727e99f..d167d93e4c12 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -452,11 +452,9 @@ static int ks8851_probe_spi(struct spi_device *spi) return ks8851_probe_common(netdev, dev, msg_enable); } -static int ks8851_remove_spi(struct spi_device *spi) +static void ks8851_remove_spi(struct spi_device *spi) { ks8851_remove_common(&spi->dev); - - return 0; } static const struct of_device_id ks8851_match_table[] = { diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 634ac7649c43..db5a3edb4c3c 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1612,15 +1612,13 @@ error_alloc: return ret; } -static int enc28j60_remove(struct spi_device *spi) +static void enc28j60_remove(struct spi_device *spi) { struct enc28j60_net *priv = spi_get_drvdata(spi); unregister_netdev(priv->netdev); free_irq(spi->irq, priv); free_netdev(priv->netdev); - - return 0; } static const struct of_device_id enc28j60_dt_ids[] = { diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index b90efc80fb59..dc1840cb5b10 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1093,7 +1093,7 @@ error_out: return ret; } -static int encx24j600_spi_remove(struct spi_device *spi) +static void encx24j600_spi_remove(struct spi_device *spi) { struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); @@ -1101,8 +1101,6 @@ static int encx24j600_spi_remove(struct spi_device *spi) kthread_stop(priv->kworker_task); free_netdev(priv->ndev); - - return 0; } static const struct spi_device_id encx24j600_spi_id_table[] = { diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 955cce644392..3c5494afd3c0 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -1001,7 +1001,7 @@ qca_spi_probe(struct spi_device *spi) return 0; } -static int +static void qca_spi_remove(struct spi_device *spi) { struct net_device *qcaspi_devs = spi_get_drvdata(spi); @@ -1011,8 +1011,6 @@ qca_spi_remove(struct spi_device *spi) unregister_netdev(qcaspi_devs); free_netdev(qcaspi_devs); - - return 0; } static const struct spi_device_id qca_spi_id[] = { diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index 89a31783fbb4..25739b182ac7 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -731,7 +731,7 @@ static int mse102x_probe_spi(struct spi_device *spi) return 0; } -static int mse102x_remove_spi(struct spi_device *spi) +static void mse102x_remove_spi(struct spi_device *spi) { struct mse102x_net *mse = dev_get_drvdata(&spi->dev); struct mse102x_net_spi *mses = to_mse102x_spi(mse); @@ -741,8 +741,6 @@ static int mse102x_remove_spi(struct spi_device *spi) mse102x_remove_device_debugfs(mses); unregister_netdev(mse->ndev); - - return 0; } static const struct of_device_id mse102x_match_table[] = { diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 7779a36da3c8..7c52796273a4 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -461,11 +461,9 @@ static int w5100_spi_probe(struct spi_device *spi) return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL); } -static int w5100_spi_remove(struct spi_device *spi) +static void w5100_spi_remove(struct spi_device *spi) { w5100_remove(&spi->dev); - - return 0; } static const struct spi_device_id w5100_spi_ids[] = { diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 7db9cbd0f5de..6afdf1622944 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1304,7 +1304,7 @@ err_alloc_wq: return ret; } -static int adf7242_remove(struct spi_device *spi) +static void adf7242_remove(struct spi_device *spi) { struct adf7242_local *lp = spi_get_drvdata(spi); @@ -1316,8 +1316,6 @@ static int adf7242_remove(struct spi_device *spi) ieee802154_unregister_hw(lp->hw); mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); - - return 0; } static const struct of_device_id adf7242_of_match[] = { diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5..a4734323dc29 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1759,7 +1759,7 @@ free_dev: return rc; } -static int at86rf230_remove(struct spi_device *spi) +static void at86rf230_remove(struct spi_device *spi) { struct at86rf230_local *lp = spi_get_drvdata(spi); @@ -1769,8 +1769,6 @@ static int at86rf230_remove(struct spi_device *spi) ieee802154_free_hw(lp->hw); at86rf230_debugfs_remove(); dev_dbg(&spi->dev, "unregistered at86rf230\n"); - - return 0; } static const struct of_device_id at86rf230_of_match[] = { diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index ece6ff6049f6..b499bbe4d48f 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -831,7 +831,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) finish:; } -static int ca8210_remove(struct spi_device *spi_device); +static void ca8210_remove(struct spi_device *spi_device); /** * ca8210_spi_transfer_complete() - Called when a single spi transfer has @@ -3048,7 +3048,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv) * * Return: 0 or linux error code */ -static int ca8210_remove(struct spi_device *spi_device) +static void ca8210_remove(struct spi_device *spi_device) { struct ca8210_priv *priv; struct ca8210_platform_data *pdata; @@ -3088,8 +3088,6 @@ static int ca8210_remove(struct spi_device *spi_device) if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) ca8210_test_interface_clear(priv); } - - return 0; } /** diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 89c046b204e0..1e1f40f628a0 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -1213,7 +1213,7 @@ err_hw_init: return ret; } -static int cc2520_remove(struct spi_device *spi) +static void cc2520_remove(struct spi_device *spi) { struct cc2520_private *priv = spi_get_drvdata(spi); @@ -1222,8 +1222,6 @@ static int cc2520_remove(struct spi_device *spi) ieee802154_unregister_hw(priv->hw); ieee802154_free_hw(priv->hw); - - return 0; } static const struct spi_device_id cc2520_ids[] = { diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 8dc04e2590b1..a3af52a8e6dd 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -1335,7 +1335,7 @@ free_dev: return ret; } -static int mcr20a_remove(struct spi_device *spi) +static void mcr20a_remove(struct spi_device *spi) { struct mcr20a_local *lp = spi_get_drvdata(spi); @@ -1343,8 +1343,6 @@ static int mcr20a_remove(struct spi_device *spi) ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); - - return 0; } static const struct of_device_id mcr20a_of_match[] = { diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index ff83e00b77af..ee4cfbf2c5cc 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1356,7 +1356,7 @@ err_ret: return ret; } -static int mrf24j40_remove(struct spi_device *spi) +static void mrf24j40_remove(struct spi_device *spi) { struct mrf24j40 *devrec = spi_get_drvdata(spi); @@ -1366,8 +1366,6 @@ static int mrf24j40_remove(struct spi_device *spi) ieee802154_free_hw(devrec->hw); /* TODO: Will ieee802154_free_device() wait until ->xmit() is * complete? */ - - return 0; } static const struct of_device_id mrf24j40_of_match[] = { diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 8b5445a724ce..ff37f8ba6758 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -517,7 +517,7 @@ static int ks8995_probe(struct spi_device *spi) return 0; } -static int ks8995_remove(struct spi_device *spi) +static void ks8995_remove(struct spi_device *spi) { struct ks8995_switch *ks = spi_get_drvdata(spi); @@ -526,8 +526,6 @@ static int ks8995_remove(struct spi_device *spi) /* assert reset */ if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1); - - return 0; } /* ------------------------------------------------------------------------ */ diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index 8e3b1c717c10..6063552cea9b 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c @@ -194,10 +194,9 @@ static int slic_ds26522_init_configure(struct spi_device *spi) return 0; } -static int slic_ds26522_remove(struct spi_device *spi) +static void slic_ds26522_remove(struct spi_device *spi) { pr_info("DS26522 module uninstalled\n"); - return 0; } static int slic_ds26522_probe(struct spi_device *spi) diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index ab0fe8565851..f99b7ba69fc3 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -669,7 +669,7 @@ err_free: return ret; } -static int p54spi_remove(struct spi_device *spi) +static void p54spi_remove(struct spi_device *spi) { struct p54s_priv *priv = spi_get_drvdata(spi); @@ -684,8 +684,6 @@ static int p54spi_remove(struct spi_device *spi) mutex_destroy(&priv->mutex); p54_free_common(priv->hw); - - return 0; } diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index cd9f8ecf171f..ff1c7ec8c450 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1195,7 +1195,7 @@ out: return err; } -static int libertas_spi_remove(struct spi_device *spi) +static void libertas_spi_remove(struct spi_device *spi) { struct if_spi_card *card = spi_get_drvdata(spi); struct lbs_private *priv = card->priv; @@ -1212,8 +1212,6 @@ static int libertas_spi_remove(struct spi_device *spi) if (card->pdata->teardown) card->pdata->teardown(spi); free_if_spi_card(card); - - return 0; } static int if_spi_suspend(struct device *dev) diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 2c2ed4b09efd..d2db52289399 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -240,7 +240,7 @@ free: return ret; } -static int wilc_bus_remove(struct spi_device *spi) +static void wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); struct wilc_spi *spi_priv = wilc->bus_data; @@ -248,8 +248,6 @@ static int wilc_bus_remove(struct spi_device *spi) clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(spi_priv); - - return 0; } static const struct of_device_id wilc_of_match[] = { diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c index 271ed2ce2d7f..fe0d220da44d 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c @@ -423,7 +423,7 @@ static int cw1200_spi_probe(struct spi_device *func) } /* Disconnect Function to be called by SPI stack when device is disconnected */ -static int cw1200_spi_disconnect(struct spi_device *func) +static void cw1200_spi_disconnect(struct spi_device *func) { struct hwbus_priv *self = spi_get_drvdata(func); @@ -435,8 +435,6 @@ static int cw1200_spi_disconnect(struct spi_device *func) } } cw1200_spi_off(dev_get_platdata(&func->dev)); - - return 0; } static int __maybe_unused cw1200_spi_suspend(struct device *dev) diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 5b894bd6237e..9df38726e8b0 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -327,14 +327,12 @@ out_free: return ret; } -static int wl1251_spi_remove(struct spi_device *spi) +static void wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = spi_get_drvdata(spi); wl1251_free_hw(wl); regulator_disable(wl->vio); - - return 0; } static struct spi_driver wl1251_spi_driver = { diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 354a7e1c3315..7eae1ec2eb2b 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -546,13 +546,11 @@ out_dev_put: return ret; } -static int wl1271_remove(struct spi_device *spi) +static void wl1271_remove(struct spi_device *spi) { struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); platform_device_unregister(glue->core); - - return 0; } static struct spi_driver wl1271_spi_driver = { diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index 5b833a9a83f8..a38e2fcdfd39 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -174,12 +174,11 @@ static int nfcmrvl_spi_probe(struct spi_device *spi) return 0; } -static int nfcmrvl_spi_remove(struct spi_device *spi) +static void nfcmrvl_spi_remove(struct spi_device *spi) { struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi); nfcmrvl_nci_unregister_dev(drv_data->priv); - return 0; } static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = { diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c index 4e723992e74c..169eacc0a32a 100644 --- a/drivers/nfc/st-nci/spi.c +++ b/drivers/nfc/st-nci/spi.c @@ -263,13 +263,11 @@ static int st_nci_spi_probe(struct spi_device *dev) return r; } -static int st_nci_spi_remove(struct spi_device *dev) +static void st_nci_spi_remove(struct spi_device *dev) { struct st_nci_spi_phy *phy = spi_get_drvdata(dev); ndlc_remove(phy->ndlc); - - return 0; } static struct spi_device_id st_nci_spi_id_table[] = { diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index b23f47936473..ed704bb77226 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -1198,7 +1198,7 @@ err_disable_regulator: return ret; } -static int st95hf_remove(struct spi_device *nfc_spi_dev) +static void st95hf_remove(struct spi_device *nfc_spi_dev) { int result = 0; unsigned char reset_cmd = ST95HF_COMMAND_RESET; @@ -1236,8 +1236,6 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev) /* disable regulator */ if (stcontext->st95hf_supply) regulator_disable(stcontext->st95hf_supply); - - return 0; } /* Register as SPI protocol driver */ diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index 29ca9c328df2..21d68664fe08 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -2144,7 +2144,7 @@ err_destroy_lock: return ret; } -static int trf7970a_remove(struct spi_device *spi) +static void trf7970a_remove(struct spi_device *spi) { struct trf7970a *trf = spi_get_drvdata(spi); @@ -2160,8 +2160,6 @@ static int trf7970a_remove(struct spi_device *spi) regulator_disable(trf->regulator); mutex_destroy(&trf->lock); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index 713c58687721..8493af0f680e 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -786,13 +786,11 @@ static int cros_ec_spi_probe(struct spi_device *spi) return 0; } -static int cros_ec_spi_remove(struct spi_device *spi) +static void cros_ec_spi_remove(struct spi_device *spi) { struct cros_ec_device *ec_dev = spi_get_drvdata(spi); cros_ec_unregister(ec_dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 0d46706afd2d..4823bd2819f6 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -648,7 +648,7 @@ static struct olpc_ec_driver olpc_xo175_ec_driver = { .ec_cmd = olpc_xo175_ec_cmd, }; -static int olpc_xo175_ec_remove(struct spi_device *spi) +static void olpc_xo175_ec_remove(struct spi_device *spi) { if (pm_power_off == olpc_xo175_ec_power_off) pm_power_off = NULL; @@ -657,8 +657,6 @@ static int olpc_xo175_ec_remove(struct spi_device *spi) platform_device_unregister(olpc_ec); olpc_ec = NULL; - - return 0; } static int olpc_xo175_ec_probe(struct spi_device *spi) diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index 2f83adef966e..6d66ab5a8b17 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -185,10 +185,9 @@ static int ds1302_probe(struct spi_device *spi) return 0; } -static int ds1302_remove(struct spi_device *spi) +static void ds1302_remove(struct spi_device *spi) { spi_set_drvdata(spi, NULL); - return 0; } #ifdef CONFIG_OF diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 9ef107b99b65..ed9360486953 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -720,7 +720,7 @@ static int ds1305_probe(struct spi_device *spi) return 0; } -static int ds1305_remove(struct spi_device *spi) +static void ds1305_remove(struct spi_device *spi) { struct ds1305 *ds1305 = spi_get_drvdata(spi); @@ -730,8 +730,6 @@ static int ds1305_remove(struct spi_device *spi) devm_free_irq(&spi->dev, spi->irq, ds1305); cancel_work_sync(&ds1305->work); } - - return 0; } static struct spi_driver ds1305_driver = { diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c index f14ed6c96437..ed5a6ba89a3e 100644 --- a/drivers/rtc/rtc-ds1343.c +++ b/drivers/rtc/rtc-ds1343.c @@ -434,11 +434,9 @@ static int ds1343_probe(struct spi_device *spi) return 0; } -static int ds1343_remove(struct spi_device *spi) +static void ds1343_remove(struct spi_device *spi) { dev_pm_clear_wake_irq(&spi->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 37f4443ce9a0..e9d83d65873b 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -854,15 +854,13 @@ static int spi_mem_probe(struct spi_device *spi) return memdrv->probe(mem); } -static int spi_mem_remove(struct spi_device *spi) +static void spi_mem_remove(struct spi_device *spi) { struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); struct spi_mem *mem = spi_get_drvdata(spi); if (memdrv->remove) - return memdrv->remove(mem); - - return 0; + memdrv->remove(mem); } static void spi_mem_shutdown(struct spi_device *spi) diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c index 169f3d595f60..d37cfe995a63 100644 --- a/drivers/spi/spi-slave-system-control.c +++ b/drivers/spi/spi-slave-system-control.c @@ -132,13 +132,12 @@ static int spi_slave_system_control_probe(struct spi_device *spi) return 0; } -static int spi_slave_system_control_remove(struct spi_device *spi) +static void spi_slave_system_control_remove(struct spi_device *spi) { struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi); spi_slave_abort(spi); wait_for_completion(&priv->finished); - return 0; } static struct spi_driver spi_slave_system_control_driver = { diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c index f2e07a392d68..f56c1afb8534 100644 --- a/drivers/spi/spi-slave-time.c +++ b/drivers/spi/spi-slave-time.c @@ -106,13 +106,12 @@ static int spi_slave_time_probe(struct spi_device *spi) return 0; } -static int spi_slave_time_remove(struct spi_device *spi) +static void spi_slave_time_remove(struct spi_device *spi) { struct spi_slave_time_priv *priv = spi_get_drvdata(spi); spi_slave_abort(spi); wait_for_completion(&priv->finished); - return 0; } static struct spi_driver spi_slave_time_driver = { diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c index f8ad0709d015..a565352f6381 100644 --- a/drivers/spi/spi-tle62x0.c +++ b/drivers/spi/spi-tle62x0.c @@ -288,7 +288,7 @@ static int tle62x0_probe(struct spi_device *spi) return ret; } -static int tle62x0_remove(struct spi_device *spi) +static void tle62x0_remove(struct spi_device *spi) { struct tle62x0_state *st = spi_get_drvdata(spi); int ptr; @@ -298,7 +298,6 @@ static int tle62x0_remove(struct spi_device *spi) device_remove_file(&spi->dev, &dev_attr_status_show); kfree(st); - return 0; } static struct spi_driver tle62x0_driver = { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4599b121d744..ead9a132dcb9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -404,15 +404,8 @@ static void spi_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); - if (sdrv->remove) { - int ret; - - ret = sdrv->remove(to_spi_device(dev)); - if (ret) - dev_warn(dev, - "Failed to unbind driver (%pe), ignoring\n", - ERR_PTR(ret)); - } + if (sdrv->remove) + sdrv->remove(to_spi_device(dev)); dev_pm_domain_detach(dev, true); } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index a5cceca8b82b..9468f74308bd 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -803,7 +803,7 @@ static int spidev_probe(struct spi_device *spi) return status; } -static int spidev_remove(struct spi_device *spi) +static void spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); @@ -820,8 +820,6 @@ static int spidev_remove(struct spi_device *spi) if (spidev->users == 0) kfree(spidev); mutex_unlock(&device_list_lock); - - return 0; } static struct spi_driver spidev_spi_driver = { diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 6a7545b5bcd2..b68f5f9b7c78 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -286,12 +286,11 @@ static int fbtft_driver_probe_spi(struct spi_device *spi) \ return fbtft_probe_common(_display, spi, NULL); \ } \ \ -static int fbtft_driver_remove_spi(struct spi_device *spi) \ +static void fbtft_driver_remove_spi(struct spi_device *spi) \ { \ struct fb_info *info = spi_get_drvdata(spi); \ \ fbtft_remove_common(&spi->dev, info); \ - return 0; \ } \ \ static struct spi_driver fbtft_driver_spi_driver = { \ diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c index 68c09fa016ed..1d31c35875e3 100644 --- a/drivers/staging/pi433/pi433_if.c +++ b/drivers/staging/pi433/pi433_if.c @@ -1264,7 +1264,7 @@ RX_failed: return retval; } -static int pi433_remove(struct spi_device *spi) +static void pi433_remove(struct spi_device *spi) { struct pi433_device *device = spi_get_drvdata(spi); @@ -1284,8 +1284,6 @@ static int pi433_remove(struct spi_device *spi) kfree(device->rx_buffer); kfree(device); - - return 0; } static const struct of_device_id pi433_dt_ids[] = { diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c index 55ffcd7c42e2..fa0ff66a457d 100644 --- a/drivers/staging/wfx/bus_spi.c +++ b/drivers/staging/wfx/bus_spi.c @@ -232,12 +232,11 @@ static int wfx_spi_probe(struct spi_device *func) return wfx_probe(bus->core); } -static int wfx_spi_remove(struct spi_device *func) +static void wfx_spi_remove(struct spi_device *func) { struct wfx_spi_priv *bus = spi_get_drvdata(func); wfx_release(bus->core); - return 0; } /* For dynamic driver binding, kernel does not use OF to match driver. It only diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 3c92d4e01488..516cff362434 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -805,7 +805,7 @@ static int max3100_probe(struct spi_device *spi) return 0; } -static int max3100_remove(struct spi_device *spi) +static void max3100_remove(struct spi_device *spi) { struct max3100_port *s = spi_get_drvdata(spi); int i; @@ -828,13 +828,12 @@ static int max3100_remove(struct spi_device *spi) for (i = 0; i < MAX_MAX3100; i++) if (max3100s[i]) { mutex_unlock(&max3100s_lock); - return 0; + return; } pr_debug("removing max3100 driver\n"); uart_unregister_driver(&max3100_uart_driver); mutex_unlock(&max3100s_lock); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index dde0824b2fa5..3112b4a05448 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1487,10 +1487,9 @@ static int max310x_spi_probe(struct spi_device *spi) return max310x_probe(&spi->dev, devtype, regmap, spi->irq); } -static int max310x_spi_remove(struct spi_device *spi) +static void max310x_spi_remove(struct spi_device *spi) { max310x_remove(&spi->dev); - return 0; } static const struct spi_device_id max310x_id_table[] = { diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 64e7e6c8145f..25d67b8c4db7 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1440,11 +1440,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi) return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq); } -static int sc16is7xx_spi_remove(struct spi_device *spi) +static void sc16is7xx_spi_remove(struct spi_device *spi) { sc16is7xx_remove(&spi->dev); - - return 0; } static const struct spi_device_id sc16is7xx_spi_id_table[] = { diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c index d2a2b20cc1ad..7d9bd16190c0 100644 --- a/drivers/usb/gadget/udc/max3420_udc.c +++ b/drivers/usb/gadget/udc/max3420_udc.c @@ -1292,7 +1292,7 @@ del_gadget: return err; } -static int max3420_remove(struct spi_device *spi) +static void max3420_remove(struct spi_device *spi) { struct max3420_udc *udc = spi_get_drvdata(spi); unsigned long flags; @@ -1304,8 +1304,6 @@ static int max3420_remove(struct spi_device *spi) kthread_stop(udc->thread_task); spin_unlock_irqrestore(&udc->lock, flags); - - return 0; } static const struct of_device_id max3420_udc_of_match[] = { diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 30de85a707fe..99a5523a79fb 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1926,7 +1926,7 @@ error: return retval; } -static int +static void max3421_remove(struct spi_device *spi) { struct max3421_hcd *max3421_hcd; @@ -1947,7 +1947,6 @@ max3421_remove(struct spi_device *spi) free_irq(spi->irq, hcd); usb_put_hcd(hcd); - return 0; } static const struct of_device_id max3421_of_match_table[] = { diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c index 8a4361e95a11..522dd81110b8 100644 --- a/drivers/video/backlight/ams369fg06.c +++ b/drivers/video/backlight/ams369fg06.c @@ -506,12 +506,11 @@ static int ams369fg06_probe(struct spi_device *spi) return 0; } -static int ams369fg06_remove(struct spi_device *spi) +static void ams369fg06_remove(struct spi_device *spi) { struct ams369fg06 *lcd = spi_get_drvdata(spi); ams369fg06_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index 33f5d80495e6..0a57033ae31d 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -542,7 +542,7 @@ static int corgi_lcd_probe(struct spi_device *spi) return 0; } -static int corgi_lcd_remove(struct spi_device *spi) +static void corgi_lcd_remove(struct spi_device *spi) { struct corgi_lcd *lcd = spi_get_drvdata(spi); @@ -550,7 +550,6 @@ static int corgi_lcd_remove(struct spi_device *spi) lcd->bl_dev->props.brightness = 0; backlight_update_status(lcd->bl_dev); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); - return 0; } static struct spi_driver corgi_lcd_driver = { diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c index 328aba9cddad..e7b6bd827986 100644 --- a/drivers/video/backlight/ili922x.c +++ b/drivers/video/backlight/ili922x.c @@ -526,10 +526,9 @@ static int ili922x_probe(struct spi_device *spi) return 0; } -static int ili922x_remove(struct spi_device *spi) +static void ili922x_remove(struct spi_device *spi) { ili922x_poweroff(spi); - return 0; } static struct spi_driver ili922x_driver = { diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 46f97d1c3d21..cc763cf15f53 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -223,12 +223,11 @@ static int l4f00242t03_probe(struct spi_device *spi) return 0; } -static int l4f00242t03_remove(struct spi_device *spi) +static void l4f00242t03_remove(struct spi_device *spi) { struct l4f00242t03_priv *priv = spi_get_drvdata(spi); l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN); - return 0; } static void l4f00242t03_shutdown(struct spi_device *spi) diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c index f949b66dce1b..5c46df8022bf 100644 --- a/drivers/video/backlight/lms501kf03.c +++ b/drivers/video/backlight/lms501kf03.c @@ -364,12 +364,11 @@ static int lms501kf03_probe(struct spi_device *spi) return 0; } -static int lms501kf03_remove(struct spi_device *spi) +static void lms501kf03_remove(struct spi_device *spi) { struct lms501kf03 *lcd = spi_get_drvdata(spi); lms501kf03_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 5cbf621e48bd..b6d373af6e3f 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c @@ -255,12 +255,11 @@ static int ltv350qv_probe(struct spi_device *spi) return 0; } -static int ltv350qv_remove(struct spi_device *spi) +static void ltv350qv_remove(struct spi_device *spi) { struct ltv350qv *lcd = spi_get_drvdata(spi); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 0de044dcafd5..fc6fbaf85594 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -397,12 +397,11 @@ static int tdo24m_probe(struct spi_device *spi) return 0; } -static int tdo24m_remove(struct spi_device *spi) +static void tdo24m_remove(struct spi_device *spi) { struct tdo24m *lcd = spi_get_drvdata(spi); tdo24m_power(lcd, FB_BLANK_POWERDOWN); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 38765544345b..23d6c6bf0f54 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -232,15 +232,13 @@ err_register: return ret; } -static int tosa_lcd_remove(struct spi_device *spi) +static void tosa_lcd_remove(struct spi_device *spi) { struct tosa_lcd_data *data = spi_get_drvdata(spi); i2c_unregister_device(data->i2c); tosa_lcd_tg_off(data); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c index 3567b45f9ba9..bfc1913e8b55 100644 --- a/drivers/video/backlight/vgg2432a4.c +++ b/drivers/video/backlight/vgg2432a4.c @@ -233,11 +233,9 @@ static int vgg2432a4_probe(struct spi_device *spi) return 0; } -static int vgg2432a4_remove(struct spi_device *spi) +static void vgg2432a4_remove(struct spi_device *spi) { ili9320_remove(spi_get_drvdata(spi)); - - return 0; } static void vgg2432a4_shutdown(struct spi_device *spi) diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index a75ae0c9b14c..03cff39d392d 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -570,14 +570,12 @@ static int mipid_spi_probe(struct spi_device *spi) return 0; } -static int mipid_spi_remove(struct spi_device *spi) +static void mipid_spi_remove(struct spi_device *spi) { struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_disable(&md->panel); kfree(md); - - return 0; } static struct spi_driver mipid_spi_driver = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index 1bec7a4422e8..aab67721263d 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -316,7 +316,7 @@ err_gpio: return r; } -static int lb035q02_panel_spi_remove(struct spi_device *spi) +static void lb035q02_panel_spi_remove(struct spi_device *spi) { struct panel_drv_data *ddata = spi_get_drvdata(spi); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -328,8 +328,6 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) lb035q02_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id lb035q02_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index dff9ebbadfc0..be9910ff6e62 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -327,7 +327,7 @@ err_gpio: return r; } -static int nec_8048_remove(struct spi_device *spi) +static void nec_8048_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -341,8 +341,6 @@ static int nec_8048_remove(struct spi_device *spi) nec_8048_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 8d8b5ff7d43c..a909b5385ca5 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -857,7 +857,7 @@ err_gpio: return r; } -static int acx565akm_remove(struct spi_device *spi) +static void acx565akm_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -874,8 +874,6 @@ static int acx565akm_remove(struct spi_device *spi) acx565akm_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id acx565akm_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index 595ebd8bd5dc..3c0f887d3092 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -425,7 +425,7 @@ err_reg: return r; } -static int td028ttec1_panel_remove(struct spi_device *spi) +static void td028ttec1_panel_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -439,8 +439,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi) td028ttec1_panel_disconnect(dssdev); omap_dss_put_device(in); - - return 0; } static const struct of_device_id td028ttec1_of_match[] = { diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index afac1d9445aa..58bbba7c037f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -564,7 +564,7 @@ err_regulator: return r; } -static int tpo_td043_remove(struct spi_device *spi) +static void tpo_td043_remove(struct spi_device *spi) { struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; @@ -580,8 +580,6 @@ static int tpo_td043_remove(struct spi_device *spi) omap_dss_put_device(in); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7ab3fed7b804..c84e61b99c7b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -280,7 +280,7 @@ struct spi_message; struct spi_driver { const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); - int (*remove)(struct spi_device *spi); + void (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); struct device_driver driver; }; diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c index 9f8123893cc8..50eb6c0e6658 100644 --- a/sound/pci/hda/cs35l41_hda_spi.c +++ b/sound/pci/hda/cs35l41_hda_spi.c @@ -28,11 +28,9 @@ static int cs35l41_hda_spi_probe(struct spi_device *spi) devm_regmap_init_spi(spi, &cs35l41_regmap_spi)); } -static int cs35l41_hda_spi_remove(struct spi_device *spi) +static void cs35l41_hda_spi_remove(struct spi_device *spi) { cs35l41_hda_remove(&spi->dev); - - return 0; } static const struct spi_device_id cs35l41_hda_spi_id[] = { diff --git a/sound/soc/codecs/adau1761-spi.c b/sound/soc/codecs/adau1761-spi.c index 655689c9778a..7c9242c2ff94 100644 --- a/sound/soc/codecs/adau1761-spi.c +++ b/sound/soc/codecs/adau1761-spi.c @@ -45,10 +45,9 @@ static int adau1761_spi_probe(struct spi_device *spi) id->driver_data, adau1761_spi_switch_mode); } -static int adau1761_spi_remove(struct spi_device *spi) +static void adau1761_spi_remove(struct spi_device *spi) { adau17x1_remove(&spi->dev); - return 0; } static const struct spi_device_id adau1761_spi_id[] = { diff --git a/sound/soc/codecs/adau1781-spi.c b/sound/soc/codecs/adau1781-spi.c index bb5613574786..1a09633d5a88 100644 --- a/sound/soc/codecs/adau1781-spi.c +++ b/sound/soc/codecs/adau1781-spi.c @@ -45,10 +45,9 @@ static int adau1781_spi_probe(struct spi_device *spi) id->driver_data, adau1781_spi_switch_mode); } -static int adau1781_spi_remove(struct spi_device *spi) +static void adau1781_spi_remove(struct spi_device *spi) { adau17x1_remove(&spi->dev); - return 0; } static const struct spi_device_id adau1781_spi_id[] = { diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index 6dfd5459aa20..169221a5b09f 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -55,13 +55,11 @@ static int cs35l41_spi_probe(struct spi_device *spi) return cs35l41_probe(cs35l41, pdata); } -static int cs35l41_spi_remove(struct spi_device *spi) +static void cs35l41_spi_remove(struct spi_device *spi) { struct cs35l41_private *cs35l41 = spi_get_drvdata(spi); cs35l41_remove(cs35l41); - - return 0; } #ifdef CONFIG_OF diff --git a/sound/soc/codecs/pcm3168a-spi.c b/sound/soc/codecs/pcm3168a-spi.c index ecd379f308e6..b5b08046f545 100644 --- a/sound/soc/codecs/pcm3168a-spi.c +++ b/sound/soc/codecs/pcm3168a-spi.c @@ -26,11 +26,9 @@ static int pcm3168a_spi_probe(struct spi_device *spi) return pcm3168a_probe(&spi->dev, regmap); } -static int pcm3168a_spi_remove(struct spi_device *spi) +static void pcm3168a_spi_remove(struct spi_device *spi) { pcm3168a_remove(&spi->dev); - - return 0; } static const struct spi_device_id pcm3168a_spi_id[] = { diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c index 7cf559b47e1c..4d29e7196380 100644 --- a/sound/soc/codecs/pcm512x-spi.c +++ b/sound/soc/codecs/pcm512x-spi.c @@ -26,10 +26,9 @@ static int pcm512x_spi_probe(struct spi_device *spi) return pcm512x_probe(&spi->dev, regmap); } -static int pcm512x_spi_remove(struct spi_device *spi) +static void pcm512x_spi_remove(struct spi_device *spi) { pcm512x_remove(&spi->dev); - return 0; } static const struct spi_device_id pcm512x_spi_id[] = { diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c index a8958cd1c692..03cce8d6404f 100644 --- a/sound/soc/codecs/tlv320aic32x4-spi.c +++ b/sound/soc/codecs/tlv320aic32x4-spi.c @@ -46,11 +46,9 @@ static int aic32x4_spi_probe(struct spi_device *spi) return aic32x4_probe(&spi->dev, regmap); } -static int aic32x4_spi_remove(struct spi_device *spi) +static void aic32x4_spi_remove(struct spi_device *spi) { aic32x4_remove(&spi->dev); - - return 0; } static const struct spi_device_id aic32x4_spi_id[] = { diff --git a/sound/soc/codecs/tlv320aic3x-spi.c b/sound/soc/codecs/tlv320aic3x-spi.c index 494e84402232..deed6ec7e081 100644 --- a/sound/soc/codecs/tlv320aic3x-spi.c +++ b/sound/soc/codecs/tlv320aic3x-spi.c @@ -35,11 +35,9 @@ static int aic3x_spi_probe(struct spi_device *spi) return aic3x_probe(&spi->dev, regmap, id->driver_data); } -static int aic3x_spi_remove(struct spi_device *spi) +static void aic3x_spi_remove(struct spi_device *spi) { aic3x_remove(&spi->dev); - - return 0; } static const struct spi_device_id aic3x_spi_id[] = { diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index 28b4656c4e14..1bef1c500c8e 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -969,7 +969,7 @@ static int wm0010_spi_probe(struct spi_device *spi) return 0; } -static int wm0010_spi_remove(struct spi_device *spi) +static void wm0010_spi_remove(struct spi_device *spi) { struct wm0010_priv *wm0010 = spi_get_drvdata(spi); @@ -980,8 +980,6 @@ static int wm0010_spi_remove(struct spi_device *spi) if (wm0010->irq) free_irq(wm0010->irq, wm0010); - - return 0; } static struct spi_driver wm0010_spi_driver = { diff --git a/sound/soc/codecs/wm8804-spi.c b/sound/soc/codecs/wm8804-spi.c index 9a8da1511c34..628568724c20 100644 --- a/sound/soc/codecs/wm8804-spi.c +++ b/sound/soc/codecs/wm8804-spi.c @@ -24,10 +24,9 @@ static int wm8804_spi_probe(struct spi_device *spi) return wm8804_probe(&spi->dev, regmap); } -static int wm8804_spi_remove(struct spi_device *spi) +static void wm8804_spi_remove(struct spi_device *spi) { wm8804_remove(&spi->dev); - return 0; } static const struct of_device_id wm8804_of_match[] = { diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c index 76c0e37a838c..56d2c712e257 100644 --- a/sound/spi/at73c213.c +++ b/sound/spi/at73c213.c @@ -1001,7 +1001,7 @@ out: return retval; } -static int snd_at73c213_remove(struct spi_device *spi) +static void snd_at73c213_remove(struct spi_device *spi) { struct snd_card *card = dev_get_drvdata(&spi->dev); struct snd_at73c213 *chip = card->private_data; @@ -1066,8 +1066,6 @@ out: ssc_free(chip->ssc); snd_card_free(card); - - return 0; } #ifdef CONFIG_PM_SLEEP -- cgit v1.2.3-71-gd317 From 1f8863bfb5ca500ea1c7669b16b1931ba27fce20 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Feb 2022 12:02:59 +0000 Subject: genirq: Allow the PM device to originate from irq domain As a preparation to moving the reference to the device used for runtime power management, add a new 'dev' field to the irqdomain structure for that exact purpose. The irq_chip_pm_{get,put}() helpers are made aware of the dual location via a new private helper. No functional change intended. Signed-off-by: Marc Zyngier Reviewed-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Tested-by: Tony Lindgren Acked-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20220201120310.878267-2-maz@kernel.org --- include/linux/irqdomain.h | 10 ++++++++++ kernel/irq/chip.c | 23 ++++++++++++++++++----- 2 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d476405802e9..be25a33293e5 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -151,6 +151,8 @@ struct irq_domain_chip_generic; * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. + * @dev: Pointer to a device that the domain represent, and that will be + * used for power management purposes. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain @@ -171,6 +173,7 @@ struct irq_domain { struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; + struct device *dev; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif @@ -226,6 +229,13 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) return to_of_node(d->fwnode); } +static inline void irq_domain_set_pm_device(struct irq_domain *d, + struct device *dev) +{ + if (d) + d->dev = dev; +} + #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c09324663088..a2a12cdbe872 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1558,6 +1558,17 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) return 0; } +static struct device *irq_get_parent_device(struct irq_data *data) +{ + if (data->chip->parent_device) + return data->chip->parent_device; + + if (data->domain) + return data->domain->dev; + + return NULL; +} + /** * irq_chip_pm_get - Enable power for an IRQ chip * @data: Pointer to interrupt specific data @@ -1567,12 +1578,13 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) */ int irq_chip_pm_get(struct irq_data *data) { + struct device *dev = irq_get_parent_device(data); int retval; - if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { - retval = pm_runtime_get_sync(data->chip->parent_device); + if (IS_ENABLED(CONFIG_PM) && dev) { + retval = pm_runtime_get_sync(dev); if (retval < 0) { - pm_runtime_put_noidle(data->chip->parent_device); + pm_runtime_put_noidle(dev); return retval; } } @@ -1590,10 +1602,11 @@ int irq_chip_pm_get(struct irq_data *data) */ int irq_chip_pm_put(struct irq_data *data) { + struct device *dev = irq_get_parent_device(data); int retval = 0; - if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) - retval = pm_runtime_put(data->chip->parent_device); + if (IS_ENABLED(CONFIG_PM) && dev) + retval = pm_runtime_put(dev); return (retval < 0) ? retval : 0; } -- cgit v1.2.3-71-gd317 From c306d737691ef84305d4ed0d302c63db2932f0bb Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 25 Jan 2022 15:57:45 -0500 Subject: NFSD: Deprecate NFS_OFFSET_MAX NFS_OFFSET_MAX was introduced way back in Linux v2.3.y before there was a kernel-wide OFFSET_MAX value. As a clean up, replace the last few uses of it with its generic equivalent, and get rid of it. Signed-off-by: Chuck Lever --- fs/nfsd/nfs3xdr.c | 2 +- fs/nfsd/nfs4xdr.c | 2 +- include/linux/nfs.h | 8 -------- 3 files changed, 2 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 2e47a07029f1..0293b8d65f10 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -1060,7 +1060,7 @@ svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name, return false; /* cookie */ resp->cookie_offset = dirlist->len; - if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0) + if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0) return false; return true; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f5e3430bb6ff..714a3a3bd50c 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3495,7 +3495,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, p = xdr_reserve_space(xdr, 3*4 + namlen); if (!p) goto fail; - p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */ + p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */ p = xdr_encode_array(p, name, namlen); /* name length & name */ nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen); diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 0dc7ad38a0da..b06375e88e58 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -36,14 +36,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc memcpy(target->data, source->data, source->size); } - -/* - * This is really a general kernel constant, but since nothing like - * this is defined in the kernel headers, I have to do it here. - */ -#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1)) - - enum nfs3_stable_how { NFS_UNSTABLE = 0, NFS_DATA_SYNC = 1, -- cgit v1.2.3-71-gd317 From 70e038f89b467995708207fb57bbf46aec32dc2c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 16 Dec 2021 12:16:42 +0100 Subject: mtd: nand: mxic-ecc: Support SPI pipelined mode Introduce the support for another possible configuration: the ECC engine may work as DMA master (pipelined) and move itself the data to/from the NAND chip into the buffer, applying the necessary corrections/computations on the fly. This driver offers an ECC engine implementation that must be instatiated from a SPI controller driver. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20211216111654.238086-17-miquel.raynal@bootlin.com --- drivers/mtd/nand/ecc-mxic.c | 186 +++++++++++++++++++++++++++++++++++++- include/linux/mtd/nand-ecc-mxic.h | 49 ++++++++++ 2 files changed, 234 insertions(+), 1 deletion(-) create mode 100644 include/linux/mtd/nand-ecc-mxic.h (limited to 'include/linux') diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 3e8fbe62547e..c122139255e5 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -40,7 +41,9 @@ #define INTRPT_SIG_EN 0x0C /* Host Controller Configuration */ #define HC_CONFIG 0x10 +#define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */ #define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */ +#define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */ #define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */ #define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */ #define BURST_TYP_FIXED 0 @@ -87,6 +90,7 @@ struct mxic_ecc_engine { int irq; struct completion complete; struct nand_ecc_engine external_engine; + struct nand_ecc_engine pipelined_engine; struct mutex lock; }; @@ -104,6 +108,7 @@ struct mxic_ecc_ctx { u8 *oobwithstat; struct scatterlist sg[2]; struct nand_page_io_req *req; + unsigned int pageoffs; }; static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng) @@ -111,11 +116,19 @@ static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng) return container_of(eng, struct mxic_ecc_engine, external_engine); } +static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng) +{ + return container_of(eng, struct mxic_ecc_engine, pipelined_engine); +} + static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand) { struct nand_ecc_engine *eng = nand->ecc.engine; - return ext_ecc_eng_to_mxic(eng); + if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL) + return ext_ecc_eng_to_mxic(eng); + else + return pip_ecc_eng_to_mxic(eng); } static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section, @@ -364,6 +377,38 @@ static int mxic_ecc_init_ctx_external(struct nand_device *nand) return 0; } +static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand) +{ + struct mxic_ecc_engine *mxic = nand_to_mxic(nand); + struct mxic_ecc_ctx *ctx; + struct device *dev; + int ret; + + dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev); + if (!dev) + return -EINVAL; + + dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n"); + + ret = mxic_ecc_init_ctx(nand, dev); + if (ret) + return ret; + + ctx = nand_to_ecc_ctx(nand); + + /* All steps should be handled in one go directly by the internal DMA */ + writel(ctx->steps, mxic->regs + CHUNK_CNT); + + /* + * Interleaved ECC scheme cannot be used otherwise factory bad block + * markers would be lost. A packed layout is mandatory. + */ + writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING, + mxic->regs + HC_CONFIG); + + return 0; +} + static void mxic_ecc_cleanup_ctx(struct nand_device *nand) { struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); @@ -419,6 +464,18 @@ static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic, return ret; } +int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, + unsigned int direction, dma_addr_t dirmap) +{ + struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng); + + if (dirmap) + writel(dirmap, mxic->regs + HC_SLV_ADDR); + + return mxic_ecc_process_data(mxic, direction); +} +EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined); + static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx) { u8 *buf = ctx->oobwithstat; @@ -592,6 +649,11 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand, dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL); + if (ret) { + nand_ecc_restore_req(&ctx->req_ctx, req); + return ret; + } + /* Extract the status bytes and reconstruct the buffer */ mxic_ecc_extract_status_bytes(ctx); mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat); @@ -601,6 +663,65 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand, return mxic_ecc_count_biterrs(mxic, nand); } +/* Pipelined ECC engine helpers */ +static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mxic_ecc_engine *mxic = nand_to_mxic(nand); + struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); + int nents; + + if (req->mode == MTD_OPS_RAW) + return 0; + + nand_ecc_tweak_req(&ctx->req_ctx, req); + ctx->req = req; + + /* Copy the OOB buffer and add room for the ECC engine status bytes */ + mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in); + + sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen); + sg_set_buf(&ctx->sg[1], ctx->oobwithstat, + req->ooblen + (ctx->steps * STAT_BYTES)); + + nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL); + if (!nents) + return -EINVAL; + + mutex_lock(&mxic->lock); + + writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR); + writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR); + + return 0; +} + +static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mxic_ecc_engine *mxic = nand_to_mxic(nand); + struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); + int ret = 0; + + if (req->mode == MTD_OPS_RAW) + return 0; + + mutex_unlock(&mxic->lock); + + dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL); + + if (req->type == NAND_PAGE_READ) { + mxic_ecc_extract_status_bytes(ctx); + mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, + ctx->oobwithstat); + ret = mxic_ecc_count_biterrs(mxic, nand); + } + + nand_ecc_restore_req(&ctx->req_ctx, req); + + return ret; +} + static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = { .init_ctx = mxic_ecc_init_ctx_external, .cleanup_ctx = mxic_ecc_cleanup_ctx, @@ -608,6 +729,69 @@ static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = { .finish_io_req = mxic_ecc_finish_io_req_external, }; +static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = { + .init_ctx = mxic_ecc_init_ctx_pipelined, + .cleanup_ctx = mxic_ecc_cleanup_ctx, + .prepare_io_req = mxic_ecc_prepare_io_req_pipelined, + .finish_io_req = mxic_ecc_finish_io_req_pipelined, +}; + +struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +{ + return &mxic_ecc_engine_pipelined_ops; +} +EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops); + +static struct platform_device * +mxic_ecc_get_pdev(struct platform_device *spi_pdev) +{ + struct platform_device *eng_pdev; + struct device_node *np; + + /* Retrieve the nand-ecc-engine phandle */ + np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0); + if (!np) + return NULL; + + /* Jump to the engine's device node */ + eng_pdev = of_find_device_by_node(np); + of_node_put(np); + + return eng_pdev; +} + +void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) +{ + struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng); + + platform_device_put(to_platform_device(mxic->dev)); +} +EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine); + +struct nand_ecc_engine * +mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev) +{ + struct platform_device *eng_pdev; + struct mxic_ecc_engine *mxic; + + eng_pdev = mxic_ecc_get_pdev(spi_pdev); + if (!eng_pdev) + return ERR_PTR(-ENODEV); + + mxic = platform_get_drvdata(eng_pdev); + if (!mxic) { + platform_device_put(eng_pdev); + return ERR_PTR(-EPROBE_DEFER); + } + + return &mxic->pipelined_engine; +} +EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine); + +/* + * Only the external ECC engine is exported as the pipelined is SoC specific, so + * it is registered directly by the drivers that wrap it. + */ static int mxic_ecc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h new file mode 100644 index 000000000000..f3aa1ac82aed --- /dev/null +++ b/include/linux/mtd/nand-ecc-mxic.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2019 Macronix + * Author: Miquèl Raynal + * + * Header for the Macronix external ECC engine. + */ + +#ifndef __MTD_NAND_ECC_MXIC_H__ +#define __MTD_NAND_ECC_MXIC_H__ + +#include +#include + +struct mxic_ecc_engine; + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) + +struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); +struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev); +void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng); +int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, + unsigned int direction, dma_addr_t dirmap); + +#else /* !CONFIG_MTD_NAND_ECC_MXIC */ + +static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +{ + return NULL; +} + +static inline struct nand_ecc_engine * +mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {} + +static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, + unsigned int direction, + dma_addr_t dirmap) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_MTD_NAND_ECC_MXIC */ + +#endif /* __MTD_NAND_ECC_MXIC_H__ */ -- cgit v1.2.3-71-gd317 From 4a3cc7fb6e63bcfdedec25364738f1493345bd20 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 27 Jan 2022 10:17:56 +0100 Subject: spi: spi-mem: Introduce a capability structure Create a spi_controller_mem_caps structure and put it within the spi_controller structure close to the spi_controller_mem_ops strucure. So far the only field in this structure is the support for dtr operations, but soon we will add another parameter. Also create a helper to parse the capabilities and check if the requested capability has been set or not. Signed-off-by: Miquel Raynal Reviewed-by: Pratyush Yadav Reviewed-by: Boris Brezillon Reviewed-by: Tudor Ambarus Reviewed-by: Mark Brown Link: https://lore.kernel.org/linux-mtd/20220127091808.1043392-2-miquel.raynal@bootlin.com --- include/linux/spi/spi-mem.h | 11 +++++++++++ include/linux/spi/spi.h | 3 +++ 2 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 85e2ff7b840d..38e5d45c9842 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -285,6 +285,17 @@ struct spi_controller_mem_ops { unsigned long timeout_ms); }; +/** + * struct spi_controller_mem_caps - SPI memory controller capabilities + * @dtr: Supports DTR operations + */ +struct spi_controller_mem_caps { + bool dtr; +}; + +#define spi_mem_controller_is_capable(ctlr, cap) \ + ((ctlr)->mem_caps && (ctlr)->mem_caps->cap) + /** * struct spi_mem_driver - SPI memory driver * @spidrv: inherit from a SPI driver diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7ab3fed7b804..cf99a1ee0e74 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -23,6 +23,7 @@ struct ptp_system_timestamp; struct spi_controller; struct spi_transfer; struct spi_controller_mem_ops; +struct spi_controller_mem_caps; /* * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, @@ -415,6 +416,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @mem_ops: optimized/dedicated operations for interactions with SPI memory. * This field is optional and should only be implemented if the * controller has native support for memory like operations. + * @mem_caps: controller capabilities for the handling of memory operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per @@ -639,6 +641,7 @@ struct spi_controller { /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; + const struct spi_controller_mem_caps *mem_caps; /* gpio chip select */ int *cs_gpios; -- cgit v1.2.3-71-gd317 From 9a15efc5d5e6b5beaed0883e5bdcd0b1384c1b20 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 27 Jan 2022 10:18:00 +0100 Subject: spi: spi-mem: Kill the spi_mem_dtr_supports_op() helper Now that spi_mem_default_supports_op() has access to the static controller capabilities (relating to memory operations), and now that these capabilities have been filled by the relevant controllers, there is no need for a specific helper checking only DTR operations, so let's just kill spi_mem_dtr_supports_op() and simply use spi_mem_default_supports_op() instead. Signed-off-by: Miquel Raynal Reviewed-by: Pratyush Yadav Reviewed-by: Boris Brezillon Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/linux-mtd/20220127091808.1043392-6-miquel.raynal@bootlin.com --- drivers/spi/spi-cadence-quadspi.c | 5 +---- drivers/spi/spi-mem.c | 10 ---------- drivers/spi/spi-mxic.c | 10 +--------- include/linux/spi/spi-mem.h | 11 ----------- 4 files changed, 2 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 455b90d1feed..b0c9f62ccefb 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1441,10 +1441,7 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem, if (!(all_true || all_false)) return false; - if (all_true) - return spi_mem_dtr_supports_op(mem, op); - else - return spi_mem_default_supports_op(mem, op); + return spi_mem_default_supports_op(mem, op); } static int cqspi_of_get_flash_pdata(struct platform_device *pdev, diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 86e6597bc3dc..ed966d8129eb 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -160,16 +160,6 @@ static bool spi_mem_check_buswidth(struct spi_mem *mem, return true; } -bool spi_mem_dtr_supports_op(struct spi_mem *mem, - const struct spi_mem_op *op) -{ - if (op->cmd.nbytes != 2) - return false; - - return spi_mem_check_buswidth(mem, op); -} -EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op); - bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 9952fcdf3627..6bec0a7c77d3 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -335,8 +335,6 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf, static bool mxic_spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { - bool all_false; - if (op->data.buswidth > 8 || op->addr.buswidth > 8 || op->dummy.buswidth > 8 || op->cmd.buswidth > 8) return false; @@ -348,13 +346,7 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem, if (op->addr.nbytes > 7) return false; - all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && - !op->data.dtr; - - if (all_false) - return spi_mem_default_supports_op(mem, op); - else - return spi_mem_dtr_supports_op(mem, op); + return spi_mem_default_supports_op(mem, op); } static int mxic_spi_mem_exec_op(struct spi_mem *mem, diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 38e5d45c9842..4a1bfe689872 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -330,10 +330,6 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); - -bool spi_mem_dtr_supports_op(struct spi_mem *mem, - const struct spi_mem_op *op); - #else static inline int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, @@ -356,13 +352,6 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, { return false; } - -static inline -bool spi_mem_dtr_supports_op(struct spi_mem *mem, - const struct spi_mem_op *op) -{ - return false; -} #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); -- cgit v1.2.3-71-gd317 From a433c2cbd75ab76f277364f44e76f32c7df306e7 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 27 Jan 2022 10:18:01 +0100 Subject: spi: spi-mem: Add an ecc parameter to the spi_mem_op structure Soon the SPI-NAND core will need a way to request a SPI controller to enable ECC support for a given operation. This is because of the pipelined integration of certain ECC engines, which are directly managed by the SPI controller itself. Introduce a spi_mem_op additional field for this purpose: ecc. So far this field is left unset and checked to be false by all the SPI controller drivers in their ->supports_op() hook, as they all call spi_mem_default_supports_op(). Signed-off-by: Miquel Raynal Acked-by: Pratyush Yadav Reviewed-by: Boris Brezillon Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/linux-mtd/20220127091808.1043392-7-miquel.raynal@bootlin.com --- drivers/spi/spi-mem.c | 5 +++++ include/linux/spi/spi-mem.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index ed966d8129eb..f38ac31961c9 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -178,6 +178,11 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, return false; } + if (op->data.ecc) { + if (!spi_mem_controller_is_capable(ctlr, ecc)) + return false; + } + return spi_mem_check_buswidth(mem, op); } EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 4a1bfe689872..2ba044d0d5e5 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -89,6 +89,7 @@ enum spi_mem_data_dir { * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not * @data.buswidth: number of IO lanes used to send/receive the data * @data.dtr: whether the data should be sent in DTR mode or not + * @data.ecc: whether error correction is required or not * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -119,6 +120,7 @@ struct spi_mem_op { struct { u8 buswidth; u8 dtr : 1; + u8 ecc : 1; enum spi_mem_data_dir dir; unsigned int nbytes; union { @@ -288,9 +290,11 @@ struct spi_controller_mem_ops { /** * struct spi_controller_mem_caps - SPI memory controller capabilities * @dtr: Supports DTR operations + * @ecc: Supports operations with error correction */ struct spi_controller_mem_caps { bool dtr; + bool ecc; }; #define spi_mem_controller_is_capable(ctlr, cap) \ -- cgit v1.2.3-71-gd317 From f9d7c7265bcff7d9a17425a8cddf702e8fe159c2 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 27 Jan 2022 10:18:03 +0100 Subject: mtd: spinand: Create direct mapping descriptors for ECC operations In order for pipelined ECC engines to be able to enable/disable the ECC engine only when needed and avoid races when future parallel-operations will be supported, we need to provide the information about the use of the ECC engine in the direct mapping hooks. As direct mapping configurations are meant to be static, it is best to create two new mappings: one for regular 'raw' accesses and one for accesses involving correction. It is up to the driver to use or not the new ECC enable boolean contained in the spi-mem operation. As dirmaps are not free (they consume a few pages of MMIO address space) and because these extra entries are only meant to be used by pipelined engines, let's limit their use to this specific type of engine and save a bit of memory with all the other setups. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20220127091808.1043392-9-miquel.raynal@bootlin.com --- drivers/mtd/nand/spi/core.c | 35 +++++++++++++++++++++++++++++++++-- include/linux/mtd/spinand.h | 2 ++ 2 files changed, 35 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index bb6b026b558b..ff8336870bc0 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -381,7 +381,10 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, } } - rdesc = spinand->dirmaps[req->pos.plane].rdesc; + if (req->mode == MTD_OPS_RAW) + rdesc = spinand->dirmaps[req->pos.plane].rdesc; + else + rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc; while (nbytes) { ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf); @@ -452,7 +455,10 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, req->ooblen); } - wdesc = spinand->dirmaps[req->pos.plane].wdesc; + if (req->mode == MTD_OPS_RAW) + wdesc = spinand->dirmaps[req->pos.plane].wdesc; + else + wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc; while (nbytes) { ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf); @@ -865,6 +871,31 @@ static int spinand_create_dirmap(struct spinand_device *spinand, spinand->dirmaps[plane].rdesc = desc; + if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) { + spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc; + spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc; + + return 0; + } + + info.op_tmpl = *spinand->op_templates.update_cache; + info.op_tmpl.data.ecc = true; + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, + spinand->spimem, &info); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + spinand->dirmaps[plane].wdesc_ecc = desc; + + info.op_tmpl = *spinand->op_templates.read_cache; + info.op_tmpl.data.ecc = true; + desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev, + spinand->spimem, &info); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + spinand->dirmaps[plane].rdesc_ecc = desc; + return 0; } diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 6988956b8492..3aa28240a77f 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -389,6 +389,8 @@ struct spinand_info { struct spinand_dirmap { struct spi_mem_dirmap_desc *wdesc; struct spi_mem_dirmap_desc *rdesc; + struct spi_mem_dirmap_desc *wdesc_ecc; + struct spi_mem_dirmap_desc *rdesc_ecc; }; /** -- cgit v1.2.3-71-gd317 From 00360ebae483e603d55ec9a7231b787cb80ffe13 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 2 Feb 2022 15:45:36 +0100 Subject: spi: mxic: Add support for pipelined ECC operations Some SPI-NAND chips do not have a proper on-die ECC engine providing error correction/detection. This is particularly an issue on embedded devices with limited resources because all the computations must happen in software, unless an external hardware engine is provided. These external engines are new and can be of two categories: external or pipelined. Macronix is providing both, the former being already supported. The second, however, is very SoC implementation dependent and must be instantiated by the SPI host controller directly. An entire subsystem has been contributed to support these engines which makes the insertion into another subsystem such as SPI quite straightforward without the need for a lot of specific functions. Signed-off-by: Miquel Raynal Reviewed-by: Mark Brown Link: https://lore.kernel.org/linux-mtd/20220202144536.393792-1-miquel.raynal@bootlin.com --- drivers/spi/Kconfig | 1 + drivers/spi/spi-mxic.c | 113 +++++++++++++++++++++++++++++++++++++- include/linux/mtd/nand-ecc-mxic.h | 2 +- include/linux/mtd/nand.h | 15 +++++ 4 files changed, 128 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index b2a8821971e1..269613b03e40 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -879,6 +879,7 @@ config SPI_SYNQUACER config SPI_MXIC tristate "Macronix MX25F0A SPI controller" depends on SPI_MASTER + imply MTD_NAND_ECC_MXIC help This selects the Macronix MX25F0A SPI controller driver. diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 0d3390964c6f..55c092069301 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -167,6 +169,7 @@ #define HW_TEST(x) (0xe0 + ((x) * 4)) struct mxic_spi { + struct device *dev; struct clk *ps_clk; struct clk *send_clk; struct clk *send_dly_clk; @@ -177,6 +180,12 @@ struct mxic_spi { dma_addr_t dma; size_t size; } linear; + + struct { + bool use_pipelined_conf; + struct nand_ecc_engine *pipelined_engine; + void *ctx; + } ecc; }; static int mxic_spi_clk_enable(struct mxic_spi *mxic) @@ -400,7 +409,15 @@ static ssize_t mxic_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, LMODE_EN, mxic->regs + LRD_CTRL); - memcpy_fromio(buf, mxic->linear.map, len); + if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) { + ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine, + NAND_PAGE_READ, + mxic->linear.dma + offs); + if (ret) + return ret; + } else { + memcpy_fromio(buf, mxic->linear.map, len); + } writel(INT_LRD_DIS, mxic->regs + INT_STS); writel(0, mxic->regs + LRD_CTRL); @@ -436,7 +453,15 @@ static ssize_t mxic_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, LMODE_EN, mxic->regs + LWR_CTRL); - memcpy_toio(mxic->linear.map, buf, len); + if (mxic->ecc.use_pipelined_conf && desc->info.op_tmpl.data.ecc) { + ret = mxic_ecc_process_data_pipelined(mxic->ecc.pipelined_engine, + NAND_PAGE_WRITE, + mxic->linear.dma + offs); + if (ret) + return ret; + } else { + memcpy_toio(mxic->linear.map, buf, len); + } writel(INT_LWR_DIS, mxic->regs + INT_STS); writel(0, mxic->regs + LWR_CTRL); @@ -547,6 +572,7 @@ static const struct spi_controller_mem_ops mxic_spi_mem_ops = { static const struct spi_controller_mem_caps mxic_spi_mem_caps = { .dtr = true, + .ecc = true, }; static void mxic_spi_set_cs(struct spi_device *spi, bool lvl) @@ -611,6 +637,80 @@ static int mxic_spi_transfer_one(struct spi_master *master, return 0; } +/* ECC wrapper */ +static int mxic_spi_mem_ecc_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + struct mxic_spi *mxic = nand->ecc.engine->priv; + + mxic->ecc.use_pipelined_conf = true; + + return ops->init_ctx(nand); +} + +static void mxic_spi_mem_ecc_cleanup_ctx(struct nand_device *nand) +{ + struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + struct mxic_spi *mxic = nand->ecc.engine->priv; + + mxic->ecc.use_pipelined_conf = false; + + ops->cleanup_ctx(nand); +} + +static int mxic_spi_mem_ecc_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + + return ops->prepare_io_req(nand, req); +} + +static int mxic_spi_mem_ecc_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_engine_ops *ops = mxic_ecc_get_pipelined_ops(); + + return ops->finish_io_req(nand, req); +} + +static struct nand_ecc_engine_ops mxic_spi_mem_ecc_engine_pipelined_ops = { + .init_ctx = mxic_spi_mem_ecc_init_ctx, + .cleanup_ctx = mxic_spi_mem_ecc_cleanup_ctx, + .prepare_io_req = mxic_spi_mem_ecc_prepare_io_req, + .finish_io_req = mxic_spi_mem_ecc_finish_io_req, +}; + +static void mxic_spi_mem_ecc_remove(struct mxic_spi *mxic) +{ + if (mxic->ecc.pipelined_engine) { + mxic_ecc_put_pipelined_engine(mxic->ecc.pipelined_engine); + nand_ecc_unregister_on_host_hw_engine(mxic->ecc.pipelined_engine); + } +} + +static int mxic_spi_mem_ecc_probe(struct platform_device *pdev, + struct mxic_spi *mxic) +{ + struct nand_ecc_engine *eng; + + if (!mxic_ecc_get_pipelined_ops()) + return -EOPNOTSUPP; + + eng = mxic_ecc_get_pipelined_engine(pdev); + if (IS_ERR(eng)) + return PTR_ERR(eng); + + eng->dev = &pdev->dev; + eng->integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; + eng->ops = &mxic_spi_mem_ecc_engine_pipelined_ops; + eng->priv = mxic; + mxic->ecc.pipelined_engine = eng; + nand_ecc_register_on_host_hw_engine(eng); + + return 0; +} + static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -656,6 +756,7 @@ static int mxic_spi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, master); mxic = spi_master_get_devdata(master); + mxic->dev = &pdev->dev; master->dev.of_node = pdev->dev.of_node; @@ -702,6 +803,12 @@ static int mxic_spi_probe(struct platform_device *pdev) mxic_spi_hw_init(mxic); + ret = mxic_spi_mem_ecc_probe(pdev, mxic); + if (ret == -EPROBE_DEFER) { + pm_runtime_disable(&pdev->dev); + return ret; + } + ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); @@ -714,8 +821,10 @@ static int mxic_spi_probe(struct platform_device *pdev) static int mxic_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); + struct mxic_spi *mxic = spi_master_get_devdata(master); pm_runtime_disable(&pdev->dev); + mxic_spi_mem_ecc_remove(mxic); spi_unregister_master(master); return 0; diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h index f3aa1ac82aed..b125926e458c 100644 --- a/include/linux/mtd/nand-ecc-mxic.h +++ b/include/linux/mtd/nand-ecc-mxic.h @@ -14,7 +14,7 @@ struct mxic_ecc_engine; -#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE) struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 615b3e3a3920..c3693bb87b4c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -303,8 +303,23 @@ int nand_ecc_prepare_io_req(struct nand_device *nand, int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req); bool nand_ecc_is_strong_enough(struct nand_device *nand); + +#if IS_REACHABLE(CONFIG_MTD_NAND_CORE) int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine); int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine); +#else +static inline int +nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +static inline int +nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +#endif + struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand); -- cgit v1.2.3-71-gd317 From beb0622138cd2848dec06b0651a988c39d099574 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Feb 2022 12:03:10 +0000 Subject: genirq: Kill irq_chip::parent_device Now that noone is using irq_chip::parent_device in the tree, get rid of it. Signed-off-by: Marc Zyngier Acked-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20220201120310.878267-13-maz@kernel.org --- include/linux/irq.h | 2 -- kernel/irq/chip.c | 3 --- 2 files changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 848e1e12c5c6..2cb2e2ac2703 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -456,7 +456,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) /** * struct irq_chip - hardware interrupt chip descriptor * - * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) @@ -503,7 +502,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @flags: chip specific flags */ struct irq_chip { - struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2a12cdbe872..24b6f2b40e5e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1560,9 +1560,6 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) static struct device *irq_get_parent_device(struct irq_data *data) { - if (data->chip->parent_device) - return data->chip->parent_device; - if (data->domain) return data->domain->dev; -- cgit v1.2.3-71-gd317 From 27c196c7b73cb70bbed3a9df46563bab60e63415 Mon Sep 17 00:00:00 2001 From: Terry Bowman Date: Wed, 9 Feb 2022 11:27:09 -0600 Subject: kernel/resource: Introduce request_mem_region_muxed() Support for requesting muxed memory region is implemented but not currently callable as a macro. Add the request muxed memory region macro. MMIO memory accesses can be synchronized using request_mem_region() which is already available. This call will return failure if the resource is busy. The 'muxed' version of this macro will handle a busy resource by using a wait queue to retry until the resource is available. Signed-off-by: Terry Bowman Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- include/linux/ioport.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 8359c50f9988..ec5f71f7135b 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -262,6 +262,8 @@ resource_union(struct resource *r1, struct resource *r2, struct resource *r) #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_muxed(start, n, name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED) #define request_mem_region_exclusive(start,n,name) \ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) #define rename_region(region, newname) do { (region)->name = (newname); } while (0) -- cgit v1.2.3-71-gd317 From 8008e7902f28eb9e5459b21d375b3e5b4090efff Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:09 +0530 Subject: soc: qcom: llcc: Update the logic for version info extraction LLCC HW version info is made up of major, branch, minor and echo version bits each of which are 8bits. Several features in newer LLCC HW are based on the full version rather than just major or minor versions such as write-subcache enable which is applicable for versions v2.0.0.0 and later, also upcoming write-subcache cacheable for SM8450 SoC which is only present in versions v2.1.0.0 and later, so it makes it easier and cleaner to just directly compare with the full version than adding additional major/branch/ minor/echo version checks. So remove the earlier major version check and add full version check for those features. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/a82d7c32348c51fcc2b63e220d91b318bf706c83.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 9 +++++---- include/linux/soc/qcom/llcc-qcom.h | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index c45146c63423..f15f4c51e997 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -37,7 +37,6 @@ #define CACHE_LINE_SIZE_SHIFT 6 #define LLCC_COMMON_HW_INFO 0x00030000 -#define LLCC_MAJOR_VERSION_MASK GENMASK(31, 24) #define LLCC_COMMON_STATUS0 0x0003000c #define LLCC_LB_CNT_MASK GENMASK(31, 28) @@ -55,6 +54,8 @@ #define BANK_OFFSET_STRIDE 0x80000 +#define LLCC_VERSION_2_0_0_0 0x02000000 + /** * struct llcc_slice_config - Data associated with the llcc slice * @usecase_id: Unique id for the client's use case @@ -504,7 +505,7 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, return ret; } - if (drv_data->major_version == 2) { + if (drv_data->version >= LLCC_VERSION_2_0_0_0) { u32 wren; wren = config->write_scid_en << config->slice_id; @@ -598,12 +599,12 @@ static int qcom_llcc_probe(struct platform_device *pdev) goto err; } - /* Extract major version of the IP */ + /* Extract version of the IP */ ret = regmap_read(drv_data->bcast_regmap, LLCC_COMMON_HW_INFO, &version); if (ret) goto err; - drv_data->major_version = FIELD_GET(LLCC_MAJOR_VERSION_MASK, version); + drv_data->version = version; ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0, &num_banks); diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 9e8fd92c96b7..beecf00b707d 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -83,7 +83,7 @@ struct llcc_edac_reg_data { * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array * @ecc_irq: interrupt for llcc cache error detection and reporting - * @major_version: Indicates the LLCC major version + * @version: Indicates the LLCC version */ struct llcc_drv_data { struct regmap *regmap; @@ -96,7 +96,7 @@ struct llcc_drv_data { unsigned long *bitmap; u32 *offsets; int ecc_irq; - u32 major_version; + u32 version; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) -- cgit v1.2.3-71-gd317 From a6e9d7ef252c44a4f33b4403cd367430697dd9be Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 28 Jan 2022 13:17:13 +0530 Subject: soc: qcom: llcc: Add configuration data for SM8450 SoC Add LLCC configuration data for SM8450 SoC. Signed-off-by: Sai Prakash Ranjan Tested-by: Vinod Koul Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/fec944cb8f2a4a70785903c6bfec629c6f31b6a4.1643355594.git.quic_saipraka@quicinc.com --- drivers/soc/qcom/llcc-qcom.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/soc/qcom/llcc-qcom.h | 5 +++++ 2 files changed, 39 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index efe962b9a1fe..eecafeded56f 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -226,6 +226,32 @@ static const struct llcc_slice_config sm8350_data[] = { { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, }; +static const struct llcc_slice_config sm8450_data[] = { + {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 }, + {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 }, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, +}; + static const u32 llcc_v1_2_reg_offset[] = { [LLCC_COMMON_HW_INFO] = 0x00030000, [LLCC_COMMON_STATUS0] = 0x0003000c, @@ -285,6 +311,13 @@ static const struct qcom_llcc_config sm8350_cfg = { .reg_offset = llcc_v1_2_reg_offset, }; +static const struct qcom_llcc_config sm8450_cfg = { + .sct_data = sm8450_data, + .size = ARRAY_SIZE(sm8450_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v21_reg_offset, +}; + static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; /** @@ -713,6 +746,7 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg }, + { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg }, { } }; diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index beecf00b707d..0bc21ee58fac 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -35,7 +35,12 @@ #define LLCC_WRCACHE 31 #define LLCC_CVPFW 32 #define LLCC_CPUSS1 33 +#define LLCC_CAMEXP0 34 +#define LLCC_CPUMTE 35 #define LLCC_CPUHWT 36 +#define LLCC_MDMCLAD2 37 +#define LLCC_CAMEXP1 38 +#define LLCC_AENPU 45 /** * struct llcc_slice_desc - Cache slice descriptor -- cgit v1.2.3-71-gd317 From 297565aa22cfa80ab0f88c3569693aea0b6afb6d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 5 Feb 2022 16:23:45 +0100 Subject: lib/xor: make xor prototypes more friendly to compiler vectorization Modern compilers are perfectly capable of extracting parallelism from the XOR routines, provided that the prototypes reflect the nature of the input accurately, in particular, the fact that the input vectors are expected not to overlap. This is not documented explicitly, but is implied by the interchangeability of the various C routines, some of which use temporary variables while others don't: this means that these routines only behave identically for non-overlapping inputs. So let's decorate these input vectors with the __restrict modifier, which informs the compiler that there is no overlap. While at it, make the input-only vectors pointer-to-const as well. Tested-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel Reviewed-by: Nick Desaulniers Link: https://github.com/ClangBuiltLinux/linux/issues/563 Signed-off-by: Herbert Xu --- arch/alpha/include/asm/xor.h | 53 ++++++++++++++------- arch/arm/include/asm/xor.h | 42 +++++++++++------ arch/arm64/include/asm/xor.h | 21 ++++++--- arch/arm64/lib/xor-neon.c | 46 ++++++++++++------- arch/ia64/include/asm/xor.h | 21 ++++++--- arch/powerpc/include/asm/xor_altivec.h | 25 +++++----- arch/powerpc/lib/xor_vmx.c | 28 ++++++++---- arch/powerpc/lib/xor_vmx.h | 27 +++++------ arch/powerpc/lib/xor_vmx_glue.c | 32 +++++++------ arch/s390/lib/xor.c | 21 ++++++--- arch/sparc/include/asm/xor_32.h | 21 ++++++--- arch/sparc/include/asm/xor_64.h | 42 +++++++++++------ arch/x86/include/asm/xor.h | 42 +++++++++++------ arch/x86/include/asm/xor_32.h | 42 +++++++++++------ arch/x86/include/asm/xor_avx.h | 21 ++++++--- include/asm-generic/xor.h | 84 ++++++++++++++++++++++------------ include/linux/raid/xor.h | 21 ++++++--- 17 files changed, 381 insertions(+), 208 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/xor.h b/arch/alpha/include/asm/xor.h index 5aeb4fb3cb7c..e0de0c233ab9 100644 --- a/arch/alpha/include/asm/xor.h +++ b/arch/alpha/include/asm/xor.h @@ -5,24 +5,43 @@ * Optimized RAID-5 checksumming functions for alpha EV5 and EV6 */ -extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); +extern void +xor_alpha_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +extern void +xor_alpha_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +extern void +xor_alpha_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +extern void +xor_alpha_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); -extern void xor_alpha_prefetch_2(unsigned long, unsigned long *, - unsigned long *); -extern void xor_alpha_prefetch_3(unsigned long, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_alpha_prefetch_4(unsigned long, unsigned long *, - unsigned long *, unsigned long *, - unsigned long *); -extern void xor_alpha_prefetch_5(unsigned long, unsigned long *, - unsigned long *, unsigned long *, - unsigned long *, unsigned long *); +extern void +xor_alpha_prefetch_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +extern void +xor_alpha_prefetch_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +extern void +xor_alpha_prefetch_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +extern void +xor_alpha_prefetch_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); asm(" \n\ .text \n\ diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h index aefddec79286..669cad5194d3 100644 --- a/arch/arm/include/asm/xor.h +++ b/arch/arm/include/asm/xor.h @@ -44,7 +44,8 @@ : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4)) static void -xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_arm4regs_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned int lines = bytes / sizeof(unsigned long) / 4; register unsigned int a1 __asm__("r4"); @@ -64,8 +65,9 @@ xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_arm4regs_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned int lines = bytes / sizeof(unsigned long) / 4; register unsigned int a1 __asm__("r4"); @@ -86,8 +88,10 @@ xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_arm4regs_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned int lines = bytes / sizeof(unsigned long) / 2; register unsigned int a1 __asm__("r8"); @@ -105,8 +109,11 @@ xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_arm4regs_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { unsigned int lines = bytes / sizeof(unsigned long) / 2; register unsigned int a1 __asm__("r8"); @@ -146,7 +153,8 @@ static struct xor_block_template xor_block_arm4regs = { extern struct xor_block_template const xor_block_neon_inner; static void -xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_neon_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { if (in_interrupt()) { xor_arm4regs_2(bytes, p1, p2); @@ -158,8 +166,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_neon_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { if (in_interrupt()) { xor_arm4regs_3(bytes, p1, p2, p3); @@ -171,8 +180,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_neon_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { if (in_interrupt()) { xor_arm4regs_4(bytes, p1, p2, p3, p4); @@ -184,8 +195,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_neon_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { if (in_interrupt()) { xor_arm4regs_5(bytes, p1, p2, p3, p4, p5); diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h index 947f6a4f1aa0..befcd8a7abc9 100644 --- a/arch/arm64/include/asm/xor.h +++ b/arch/arm64/include/asm/xor.h @@ -16,7 +16,8 @@ extern struct xor_block_template const xor_block_inner_neon; static void -xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_neon_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { kernel_neon_begin(); xor_block_inner_neon.do_2(bytes, p1, p2); @@ -24,8 +25,9 @@ xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_neon_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { kernel_neon_begin(); xor_block_inner_neon.do_3(bytes, p1, p2, p3); @@ -33,8 +35,10 @@ xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_neon_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { kernel_neon_begin(); xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4); @@ -42,8 +46,11 @@ xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_neon_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { kernel_neon_begin(); xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5); diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c index d189cf4e70ea..96b171995d19 100644 --- a/arch/arm64/lib/xor-neon.c +++ b/arch/arm64/lib/xor-neon.c @@ -10,8 +10,8 @@ #include #include -void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1, - unsigned long *p2) +void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -37,8 +37,9 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3) +void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -72,8 +73,10 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3, unsigned long *p4) +void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -115,9 +118,11 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3, - unsigned long *p4, unsigned long *p5) +void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -186,8 +191,10 @@ static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r) return res; } -static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3) +static void xor_arm64_eor3_3(unsigned long bytes, + unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -219,9 +226,11 @@ static void xor_arm64_eor3_3(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3, - unsigned long *p4) +static void xor_arm64_eor3_4(unsigned long bytes, + unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; @@ -261,9 +270,12 @@ static void xor_arm64_eor3_4(unsigned long bytes, unsigned long *p1, } while (--lines > 0); } -static void xor_arm64_eor3_5(unsigned long bytes, unsigned long *p1, - unsigned long *p2, unsigned long *p3, - unsigned long *p4, unsigned long *p5) +static void xor_arm64_eor3_5(unsigned long bytes, + unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { uint64_t *dp1 = (uint64_t *)p1; uint64_t *dp2 = (uint64_t *)p2; diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h index 673051bf9d7d..6785f70d3208 100644 --- a/arch/ia64/include/asm/xor.h +++ b/arch/ia64/include/asm/xor.h @@ -4,13 +4,20 @@ */ -extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); +extern void xor_ia64_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +extern void xor_ia64_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +extern void xor_ia64_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +extern void xor_ia64_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); static struct xor_block_template xor_block_ia64 = { .name = "ia64", diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h index 6ca923510b59..294620a25f80 100644 --- a/arch/powerpc/include/asm/xor_altivec.h +++ b/arch/powerpc/include/asm/xor_altivec.h @@ -3,17 +3,20 @@ #define _ASM_POWERPC_XOR_ALTIVEC_H #ifdef CONFIG_ALTIVEC - -void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in); -void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in); -void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in); -void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in); +void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); #endif #endif /* _ASM_POWERPC_XOR_ALTIVEC_H */ diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c index 54e61979e80e..aab49d056d18 100644 --- a/arch/powerpc/lib/xor_vmx.c +++ b/arch/powerpc/lib/xor_vmx.c @@ -49,8 +49,9 @@ typedef vector signed char unative_t; V1##_3 = vec_xor(V1##_3, V2##_3); \ } while (0) -void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in) +void __xor_altivec_2(unsigned long bytes, + unsigned long * __restrict v1_in, + const unsigned long * __restrict v2_in) { DEFINE(v1); DEFINE(v2); @@ -67,8 +68,10 @@ void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in, } while (--lines > 0); } -void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in) +void __xor_altivec_3(unsigned long bytes, + unsigned long * __restrict v1_in, + const unsigned long * __restrict v2_in, + const unsigned long * __restrict v3_in) { DEFINE(v1); DEFINE(v2); @@ -89,9 +92,11 @@ void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in, } while (--lines > 0); } -void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in) +void __xor_altivec_4(unsigned long bytes, + unsigned long * __restrict v1_in, + const unsigned long * __restrict v2_in, + const unsigned long * __restrict v3_in, + const unsigned long * __restrict v4_in) { DEFINE(v1); DEFINE(v2); @@ -116,9 +121,12 @@ void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in, } while (--lines > 0); } -void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in) +void __xor_altivec_5(unsigned long bytes, + unsigned long * __restrict v1_in, + const unsigned long * __restrict v2_in, + const unsigned long * __restrict v3_in, + const unsigned long * __restrict v4_in, + const unsigned long * __restrict v5_in) { DEFINE(v1); DEFINE(v2); diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h index 5c2b0839b179..573c41d90dac 100644 --- a/arch/powerpc/lib/xor_vmx.h +++ b/arch/powerpc/lib/xor_vmx.h @@ -6,16 +6,17 @@ * outside of the enable/disable altivec block. */ -void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in); - -void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in); - -void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in); - -void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in); +void __xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void __xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +void __xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void __xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c index 80dba916c367..35d917ece4d1 100644 --- a/arch/powerpc/lib/xor_vmx_glue.c +++ b/arch/powerpc/lib/xor_vmx_glue.c @@ -12,47 +12,51 @@ #include #include "xor_vmx.h" -void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in) +void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { preempt_disable(); enable_kernel_altivec(); - __xor_altivec_2(bytes, v1_in, v2_in); + __xor_altivec_2(bytes, p1, p2); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_2); -void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in) +void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { preempt_disable(); enable_kernel_altivec(); - __xor_altivec_3(bytes, v1_in, v2_in, v3_in); + __xor_altivec_3(bytes, p1, p2, p3); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_3); -void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in) +void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { preempt_disable(); enable_kernel_altivec(); - __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in); + __xor_altivec_4(bytes, p1, p2, p3, p4); disable_kernel_altivec(); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_4); -void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in) +void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { preempt_disable(); enable_kernel_altivec(); - __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in); + __xor_altivec_5(bytes, p1, p2, p3, p4, p5); disable_kernel_altivec(); preempt_enable(); } diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c index a963c3d8ad0d..fb924a8041dc 100644 --- a/arch/s390/lib/xor.c +++ b/arch/s390/lib/xor.c @@ -11,7 +11,8 @@ #include #include -static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { asm volatile( " larl 1,2f\n" @@ -32,8 +33,9 @@ static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) : "0", "1", "cc", "memory"); } -static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { asm volatile( " larl 1,2f\n" @@ -58,8 +60,10 @@ static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, : : "0", "1", "cc", "memory"); } -static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { asm volatile( " larl 1,2f\n" @@ -88,8 +92,11 @@ static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, : : "0", "1", "cc", "memory"); } -static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { asm volatile( " larl 1,2f\n" diff --git a/arch/sparc/include/asm/xor_32.h b/arch/sparc/include/asm/xor_32.h index 3e5af37e4b9c..0351813cf3af 100644 --- a/arch/sparc/include/asm/xor_32.h +++ b/arch/sparc/include/asm/xor_32.h @@ -13,7 +13,8 @@ */ static void -sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +sparc_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { int lines = bytes / (sizeof (long)) / 8; @@ -50,8 +51,9 @@ sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +sparc_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { int lines = bytes / (sizeof (long)) / 8; @@ -101,8 +103,10 @@ sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +sparc_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { int lines = bytes / (sizeof (long)) / 8; @@ -165,8 +169,11 @@ sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +sparc_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { int lines = bytes / (sizeof (long)) / 8; diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h index 16169f3edcd5..caaddea8ad79 100644 --- a/arch/sparc/include/asm/xor_64.h +++ b/arch/sparc/include/asm/xor_64.h @@ -12,13 +12,20 @@ #include -void xor_vis_2(unsigned long, unsigned long *, unsigned long *); -void xor_vis_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -void xor_vis_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -void xor_vis_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); +void xor_vis_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void xor_vis_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +void xor_vis_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void xor_vis_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); /* XXX Ugh, write cheetah versions... -DaveM */ @@ -30,13 +37,20 @@ static struct xor_block_template xor_block_VIS = { .do_5 = xor_vis_5, }; -void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); -void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); +void xor_niagara_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2); +void xor_niagara_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3); +void xor_niagara_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4); +void xor_niagara_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5); static struct xor_block_template xor_block_niagara = { .name = "Niagara", diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 2ee95a7769e6..7b0307acc410 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h @@ -57,7 +57,8 @@ op(i + 3, 3) static void -xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_sse_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned long lines = bytes >> 8; @@ -108,7 +109,8 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_sse_2_pf64(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned long lines = bytes >> 8; @@ -142,8 +144,9 @@ xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_sse_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned long lines = bytes >> 8; @@ -201,8 +204,9 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_sse_3_pf64(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned long lines = bytes >> 8; @@ -238,8 +242,10 @@ xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_sse_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned long lines = bytes >> 8; @@ -304,8 +310,10 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_sse_4_pf64(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned long lines = bytes >> 8; @@ -343,8 +351,11 @@ xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_sse_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { unsigned long lines = bytes >> 8; @@ -416,8 +427,11 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_sse_5_pf64(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { unsigned long lines = bytes >> 8; diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index 67ceb790e639..7a6b9474591e 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h @@ -21,7 +21,8 @@ #include static void -xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_pII_mmx_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned long lines = bytes >> 7; @@ -64,8 +65,9 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_pII_mmx_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned long lines = bytes >> 7; @@ -113,8 +115,10 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_pII_mmx_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned long lines = bytes >> 7; @@ -168,8 +172,11 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, static void -xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_pII_mmx_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { unsigned long lines = bytes >> 7; @@ -248,7 +255,8 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, #undef BLOCK static void -xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_p5_mmx_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned long lines = bytes >> 6; @@ -295,8 +303,9 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_p5_mmx_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned long lines = bytes >> 6; @@ -352,8 +361,10 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_p5_mmx_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned long lines = bytes >> 6; @@ -418,8 +429,11 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_p5_mmx_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { unsigned long lines = bytes >> 6; diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h index 0c4e5b5e3852..7f81dd5897f4 100644 --- a/arch/x86/include/asm/xor_avx.h +++ b/arch/x86/include/asm/xor_avx.h @@ -26,7 +26,8 @@ BLOCK4(8) \ BLOCK4(12) -static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) +static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0, + const unsigned long * __restrict p1) { unsigned long lines = bytes >> 9; @@ -52,8 +53,9 @@ do { \ kernel_fpu_end(); } -static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, - unsigned long *p2) +static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0, + const unsigned long * __restrict p1, + const unsigned long * __restrict p2) { unsigned long lines = bytes >> 9; @@ -82,8 +84,10 @@ do { \ kernel_fpu_end(); } -static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, - unsigned long *p2, unsigned long *p3) +static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0, + const unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { unsigned long lines = bytes >> 9; @@ -115,8 +119,11 @@ do { \ kernel_fpu_end(); } -static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, - unsigned long *p2, unsigned long *p3, unsigned long *p4) +static void xor_avx_5(unsigned long bytes, unsigned long * __restrict p0, + const unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { unsigned long lines = bytes >> 9; diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h index b62a2a56a4d4..44509d48fca2 100644 --- a/include/asm-generic/xor.h +++ b/include/asm-generic/xor.h @@ -8,7 +8,8 @@ #include static void -xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8; @@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8; @@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8; @@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8; @@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8; @@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8; @@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8; @@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8; @@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); @@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); @@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8 - 1; diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 2a9fee8ddae3..51b811b62322 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -11,13 +11,20 @@ struct xor_block_template { struct xor_block_template *next; const char *name; int speed; - void (*do_2)(unsigned long, unsigned long *, unsigned long *); - void (*do_3)(unsigned long, unsigned long *, unsigned long *, - unsigned long *); - void (*do_4)(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); - void (*do_5)(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); + void (*do_2)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_3)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_4)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_5)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); }; #endif -- cgit v1.2.3-71-gd317 From dc1b4df09acdca7a89806b28f235cd6d8dcd3d24 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Feb 2022 10:19:43 +0000 Subject: atomics: Fix atomic64_{read_acquire,set_release} fallbacks Arnd reports that on 32-bit architectures, the fallbacks for atomic64_read_acquire() and atomic64_set_release() are broken as they use smp_load_acquire() and smp_store_release() respectively, which do not work on types larger than the native word size. Since those contain compiletime_assert_atomic_type(), any attempt to use those fallbacks will result in a build-time error. e.g. with the following added to arch/arm/kernel/setup.c: | void test_atomic64(atomic64_t *v) | { | atomic64_set_release(v, 5); | atomic64_read_acquire(v); | } The compiler will complain as follows: | In file included from : | In function 'arch_atomic64_set_release', | inlined from 'test_atomic64' at ./include/linux/atomic/atomic-instrumented.h:669:2: | ././include/linux/compiler_types.h:346:38: error: call to '__compiletime_assert_9' declared with attribute error: Need native word sized stores/loads for atomicity. | 346 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) | | ^ | ././include/linux/compiler_types.h:327:4: note: in definition of macro '__compiletime_assert' | 327 | prefix ## suffix(); \ | | ^~~~~~ | ././include/linux/compiler_types.h:346:2: note: in expansion of macro '_compiletime_assert' | 346 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) | | ^~~~~~~~~~~~~~~~~~~ | ././include/linux/compiler_types.h:349:2: note: in expansion of macro 'compiletime_assert' | 349 | compiletime_assert(__native_word(t), \ | | ^~~~~~~~~~~~~~~~~~ | ./include/asm-generic/barrier.h:133:2: note: in expansion of macro 'compiletime_assert_atomic_type' | 133 | compiletime_assert_atomic_type(*p); \ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ./include/asm-generic/barrier.h:164:55: note: in expansion of macro '__smp_store_release' | 164 | #define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) | | ^~~~~~~~~~~~~~~~~~~ | ./include/linux/atomic/atomic-arch-fallback.h:1270:2: note: in expansion of macro 'smp_store_release' | 1270 | smp_store_release(&(v)->counter, i); | | ^~~~~~~~~~~~~~~~~ | make[2]: *** [scripts/Makefile.build:288: arch/arm/kernel/setup.o] Error 1 | make[1]: *** [scripts/Makefile.build:550: arch/arm/kernel] Error 2 | make: *** [Makefile:1831: arch/arm] Error 2 Fix this by only using smp_load_acquire() and smp_store_release() for native atomic types, and otherwise falling back to the regular barriers necessary for acquire/release semantics, as we do in the more generic acquire and release fallbacks. Since the fallback templates are used to generate the atomic64_*() and atomic_*() operations, the __native_word() check is added to both. For the atomic_*() operations, which are always 32-bit, the __native_word() check is redundant but not harmful, as it is always true. For the example above this works as expected on 32-bit, e.g. for arm multi_v7_defconfig: | : | push {r4, r5} | dmb ish | pldw [r0] | mov r2, #5 | mov r3, #0 | ldrexd r4, [r0] | strexd r4, r2, [r0] | teq r4, #0 | bne 484 | ldrexd r2, [r0] | dmb ish | pop {r4, r5} | bx lr ... and also on 64-bit, e.g. for arm64 defconfig: | : | bti c | paciasp | mov x1, #0x5 | stlr x1, [x0] | ldar x0, [x0] | autiasp | ret Reported-by: Arnd Bergmann Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ard Biesheuvel Reviewed-by: Boqun Feng Link: https://lore.kernel.org/r/20220207101943.439825-1-mark.rutland@arm.com --- include/linux/atomic/atomic-arch-fallback.h | 38 +++++++++++++++++++++++++---- scripts/atomic/fallbacks/read_acquire | 11 ++++++++- scripts/atomic/fallbacks/set_release | 7 +++++- 3 files changed, 49 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..6db58d180866 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -151,7 +151,16 @@ static __always_inline int arch_atomic_read_acquire(const atomic_t *v) { - return smp_load_acquire(&(v)->counter); + int ret; + + if (__native_word(atomic_t)) { + ret = smp_load_acquire(&(v)->counter); + } else { + ret = arch_atomic_read(v); + __atomic_acquire_fence(); + } + + return ret; } #define arch_atomic_read_acquire arch_atomic_read_acquire #endif @@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v) static __always_inline void arch_atomic_set_release(atomic_t *v, int i) { - smp_store_release(&(v)->counter, i); + if (__native_word(atomic_t)) { + smp_store_release(&(v)->counter, i); + } else { + __atomic_release_fence(); + arch_atomic_set(v, i); + } } #define arch_atomic_set_release arch_atomic_set_release #endif @@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v) static __always_inline s64 arch_atomic64_read_acquire(const atomic64_t *v) { - return smp_load_acquire(&(v)->counter); + s64 ret; + + if (__native_word(atomic64_t)) { + ret = smp_load_acquire(&(v)->counter); + } else { + ret = arch_atomic64_read(v); + __atomic_acquire_fence(); + } + + return ret; } #define arch_atomic64_read_acquire arch_atomic64_read_acquire #endif @@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v) static __always_inline void arch_atomic64_set_release(atomic64_t *v, s64 i) { - smp_store_release(&(v)->counter, i); + if (__native_word(atomic64_t)) { + smp_store_release(&(v)->counter, i); + } else { + __atomic_release_fence(); + arch_atomic64_set(v, i); + } } #define arch_atomic64_set_release arch_atomic64_set_release #endif @@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// cca554917d7ea73d5e3e7397dd70c484cad9b2c4 +// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire index 803ba7561076..a0ea1d26e6b2 100755 --- a/scripts/atomic/fallbacks/read_acquire +++ b/scripts/atomic/fallbacks/read_acquire @@ -2,6 +2,15 @@ cat <counter); + ${int} ret; + + if (__native_word(${atomic}_t)) { + ret = smp_load_acquire(&(v)->counter); + } else { + ret = arch_${atomic}_read(v); + __atomic_acquire_fence(); + } + + return ret; } EOF diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release index 86ede759f24e..05cdb7f42477 100755 --- a/scripts/atomic/fallbacks/set_release +++ b/scripts/atomic/fallbacks/set_release @@ -2,6 +2,11 @@ cat <counter, i); + if (__native_word(${atomic}_t)) { + smp_store_release(&(v)->counter, i); + } else { + __atomic_release_fence(); + arch_${atomic}_set(v, i); + } } EOF -- cgit v1.2.3-71-gd317 From 9983a9d577db415c41099a20a5637ab25dd3c240 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 8 Feb 2022 18:08:02 +0100 Subject: locking/local_lock: Make the empty local_lock_*() function a macro. It has been said that local_lock() does not add any overhead compared to preempt_disable() in a !LOCKDEP configuration. A micro benchmark showed an unexpected result which can be reduced to the fact that local_lock() was not entirely optimized away. In the !LOCKDEP configuration local_lock_acquire() is an empty static inline function. On x86 the this_cpu_ptr() argument of that function is fully evaluated leading to an additional mov+add instructions which are not needed and not used. Replace the static inline function with a macro. The typecheck() macro ensures that the argument is of proper type while the resulting disassembly shows no traces of this_cpu_ptr(). Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Waiman Long Link: https://lkml.kernel.org/r/YgKjciR60fZft2l4@linutronix.de --- include/linux/local_lock_internal.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 975e33b793a7..6d635e8306d6 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l) } #else /* CONFIG_DEBUG_LOCK_ALLOC */ # define LOCAL_LOCK_DEBUG_INIT(lockname) -static inline void local_lock_acquire(local_lock_t *l) { } -static inline void local_lock_release(local_lock_t *l) { } -static inline void local_lock_debug_init(local_lock_t *l) { } +# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0) +# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0) +# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0) #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } -- cgit v1.2.3-71-gd317 From 48b6190a00425a1bebac9f7ae4b338a1e20f50f3 Mon Sep 17 00:00:00 2001 From: "D. Wythe" Date: Thu, 10 Feb 2022 17:11:36 +0800 Subject: net/smc: Limit SMC visits when handshake workqueue congested This patch intends to provide a mechanism to put constraint on SMC connections visit according to the pressure of SMC handshake process. At present, frequent visits will cause the incoming connections to be backlogged in SMC handshake queue, raise the connections established time. Which is quite unacceptable for those applications who base on short lived connections. There are two ways to implement this mechanism: 1. Put limitation after TCP established. 2. Put limitation before TCP established. In the first way, we need to wait and receive CLC messages that the client will potentially send, and then actively reply with a decline message, in a sense, which is also a sort of SMC handshake, affect the connections established time on its way. In the second way, the only problem is that we need to inject SMC logic into TCP when it is about to reply the incoming SYN, since we already do that, it's seems not a problem anymore. And advantage is obvious, few additional processes are required to complete the constraint. This patch use the second way. After this patch, connections who beyond constraint will not informed any SMC indication, and SMC will not be involved in any of its subsequent processes. Link: https://lore.kernel.org/all/1641301961-59331-1-git-send-email-alibuda@linux.alibaba.com/ Signed-off-by: D. Wythe Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 + net/ipv4/tcp_input.c | 3 ++- net/smc/af_smc.c | 17 +++++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 78b91bb92f0d..1168302b7927 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -394,6 +394,7 @@ struct tcp_sock { bool is_mptcp; #endif #if IS_ENABLED(CONFIG_SMC) + bool (*smc_hs_congested)(const struct sock *sk); bool syn_smc; /* SYN includes SMC */ #endif diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index af94a6d22a9d..92e65d56dc2c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6703,7 +6703,8 @@ static void tcp_openreq_init(struct request_sock *req, ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); #if IS_ENABLED(CONFIG_SMC) - ireq->smc_ok = rx_opt->smc_ok; + ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested && + tcp_sk(sk)->smc_hs_congested(sk)); #endif } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 858724256078..a05ffb268c3e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -103,6 +103,21 @@ drop: return NULL; } +static bool smc_hs_congested(const struct sock *sk) +{ + const struct smc_sock *smc; + + smc = smc_clcsock_user_data(sk); + + if (!smc) + return true; + + if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) + return true; + + return false; +} + static struct smc_hashinfo smc_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), }; @@ -2311,6 +2326,8 @@ static int smc_listen(struct socket *sock, int backlog) inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; + tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; + rc = kernel_listen(smc->clcsock, backlog); if (rc) { smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; -- cgit v1.2.3-71-gd317 From a6a6fe27bab48f0d09a64b051e7bde432fcae081 Mon Sep 17 00:00:00 2001 From: "D. Wythe" Date: Thu, 10 Feb 2022 17:11:37 +0800 Subject: net/smc: Dynamic control handshake limitation by socket options This patch aims to add dynamic control for SMC handshake limitation for every smc sockets, in production environment, it is possible for the same applications to handle different service types, and may have different opinion on SMC handshake limitation. This patch try socket options to complete it, since we don't have socket option level for SMC yet, which requires us to implement it at the same time. This patch does the following: - add new socket option level: SOL_SMC. - add new SMC socket option: SMC_LIMIT_HS. - provide getter/setter for SMC socket options. Link: https://lore.kernel.org/all/20f504f961e1a803f85d64229ad84260434203bd.1644323503.git.alibuda@linux.alibaba.com/ Signed-off-by: D. Wythe Signed-off-by: David S. Miller --- include/linux/socket.h | 1 + include/uapi/linux/smc.h | 4 +++ net/smc/af_smc.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc.h | 1 + 4 files changed, 74 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index 8ef26d89ef49..6f85f5d957ef 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -366,6 +366,7 @@ struct ucred { #define SOL_XDP 283 #define SOL_MPTCP 284 #define SOL_MCTP 285 +#define SOL_SMC 286 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 6c2874fd2c00..343e7450c3a3 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -284,4 +284,8 @@ enum { __SMC_NLA_SEID_TABLE_MAX, SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1 }; + +/* SMC socket options */ +#define SMC_LIMIT_HS 1 /* constraint on smc handshake */ + #endif /* _UAPI_LINUX_SMC_H */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index a05ffb268c3e..97dcdc0a2107 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2326,7 +2326,8 @@ static int smc_listen(struct socket *sock, int backlog) inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; + if (smc->limit_smc_hs) + tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; rc = kernel_listen(smc->clcsock, backlog); if (rc) { @@ -2621,6 +2622,67 @@ out: return rc ? rc : rc1; } +static int __smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct smc_sock *smc; + int val, len; + + smc = smc_sk(sock->sk); + + if (get_user(len, optlen)) + return -EFAULT; + + len = min_t(int, len, sizeof(int)); + + if (len < 0) + return -EINVAL; + + switch (optname) { + case SMC_LIMIT_HS: + val = smc->limit_smc_hs; + break; + default: + return -EOPNOTSUPP; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +static int __smc_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int val, rc; + + smc = smc_sk(sk); + + lock_sock(sk); + switch (optname) { + case SMC_LIMIT_HS: + if (optlen < sizeof(int)) + return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(int))) + return -EFAULT; + + smc->limit_smc_hs = !!val; + rc = 0; + break; + default: + rc = -EOPNOTSUPP; + break; + } + release_sock(sk); + + return rc; +} + static int smc_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -2630,6 +2692,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (level == SOL_TCP && optname == TCP_ULP) return -EOPNOTSUPP; + else if (level == SOL_SMC) + return __smc_setsockopt(sock, level, optname, optval, optlen); smc = smc_sk(sk); @@ -2712,6 +2776,9 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, struct smc_sock *smc; int rc; + if (level == SOL_SMC) + return __smc_getsockopt(sock, level, optname, optval, optlen); + smc = smc_sk(sock->sk); mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { diff --git a/net/smc/smc.h b/net/smc/smc.h index e91e40040d07..7e2693832a1b 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -249,6 +249,7 @@ struct smc_sock { /* smc sock container */ struct work_struct smc_listen_work;/* prepare new accept socket */ struct list_head accept_q; /* sockets to be accepted */ spinlock_t accept_q_lock; /* protects accept_q */ + bool limit_smc_hs; /* put constraint on handshake */ bool use_fallback; /* fallback to tcp */ int fallback_rsn; /* reason for fallback */ u32 peer_diagnosis; /* decline reason from peer */ -- cgit v1.2.3-71-gd317 From 0e51e2ab49a99bc5077760aa083dfa1c3bf9899b Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 11 Feb 2022 18:11:47 +0800 Subject: block: remove THROTL_IOPS_MAX No one uses THROTL_IOPS_MAX any more, so remove it. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220211101149.2368042-2-ming.lei@redhat.com Signed-off-by: Jens Axboe --- include/linux/blk-cgroup.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index b4de2010fba5..bdc49bd4eef0 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -28,8 +28,6 @@ /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) -/* Max limits for throttle policy */ -#define THROTL_IOPS_MAX UINT_MAX #define FC_APPID_LEN 129 -- cgit v1.2.3-71-gd317 From 672fdcf0e7de3b1e39416ac85abf178f023271f1 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 11 Feb 2022 18:11:49 +0800 Subject: block: partition include/linux/blk-cgroup.h Partition include/linux/blk-cgroup.h into two parts: one is public part, the other is block layer private part. Suggested by Christoph Hellwig. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220211101149.2368042-4-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/bfq-iosched.h | 1 - block/bio.c | 2 +- block/blk-cgroup-rwstat.h | 2 +- block/blk-cgroup.c | 2 +- block/blk-cgroup.h | 477 ++++++++++++++++++++++++++++++++++++++++++++ block/blk-core.c | 2 +- block/blk-crypto-fallback.c | 2 +- block/blk-iocost.c | 2 +- block/blk-iolatency.c | 2 +- block/blk-ioprio.c | 2 +- block/blk-sysfs.c | 2 +- block/blk-throttle.c | 1 - block/bounce.c | 2 +- block/elevator.c | 2 +- include/linux/blk-cgroup.h | 459 +----------------------------------------- 15 files changed, 493 insertions(+), 467 deletions(-) create mode 100644 block/blk-cgroup.h (limited to 'include/linux') diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 07288b9da389..72255ec44f8f 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -8,7 +8,6 @@ #include #include -#include #include "blk-cgroup-rwstat.h" diff --git a/block/bio.c b/block/bio.c index 18d34b33351b..b15f5466ce08 100644 --- a/block/bio.c +++ b/block/bio.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -24,6 +23,7 @@ #include #include "blk.h" #include "blk-rq-qos.h" +#include "blk-cgroup.h" struct bio_alloc_cache { struct bio *free_list; diff --git a/block/blk-cgroup-rwstat.h b/block/blk-cgroup-rwstat.h index ee746919c41f..9f2723b34b75 100644 --- a/block/blk-cgroup-rwstat.h +++ b/block/blk-cgroup-rwstat.h @@ -6,7 +6,7 @@ #ifndef _BLK_CGROUP_RWSTAT_H #define _BLK_CGROUP_RWSTAT_H -#include +#include "blk-cgroup.h" enum blkg_rwstat_type { BLKG_RWSTAT_READ, diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 35deaceba1f0..4108d445c73a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -26,11 +26,11 @@ #include #include #include -#include #include #include #include #include "blk.h" +#include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-throttle.h" diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h new file mode 100644 index 000000000000..3e91803c4a55 --- /dev/null +++ b/block/blk-cgroup.h @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BLK_CGROUP_PRIVATE_H +#define _BLK_CGROUP_PRIVATE_H +/* + * block cgroup private header + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Copyright (C) 2009 Vivek Goyal + * Nauman Rafique + */ + +#include + +/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ +#define BLKG_STAT_CPU_BATCH (INT_MAX / 2) + +#ifdef CONFIG_BLK_CGROUP + +/* + * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a + * request_queue (q). This is used by blkcg policies which need to track + * information per blkcg - q pair. + * + * There can be multiple active blkcg policies and each blkg:policy pair is + * represented by a blkg_policy_data which is allocated and freed by each + * policy's pd_alloc/free_fn() methods. A policy can allocate private data + * area by allocating larger data structure which embeds blkg_policy_data + * at the beginning. + */ +struct blkg_policy_data { + /* the blkg and policy id this per-policy data belongs to */ + struct blkcg_gq *blkg; + int plid; +}; + +/* + * Policies that need to keep per-blkcg data which is independent from any + * request_queue associated to it should implement cpd_alloc/free_fn() + * methods. A policy can allocate private data area by allocating larger + * data structure which embeds blkcg_policy_data at the beginning. + * cpd_init() is invoked to let each policy handle per-blkcg data. + */ +struct blkcg_policy_data { + /* the blkcg and policy id this per-policy data belongs to */ + struct blkcg *blkcg; + int plid; +}; + +typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); +typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, + struct request_queue *q, struct blkcg *blkcg); +typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); +typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, + struct seq_file *s); + +struct blkcg_policy { + int plid; + /* cgroup files for the policy */ + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + + /* operations */ + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +extern struct blkcg blkcg_root; +extern bool blkcg_debug_stats; + +struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, + struct request_queue *q, bool update_hint); +int blkcg_init_queue(struct request_queue *q); +void blkcg_exit_queue(struct request_queue *q); + +/* Blkio controller policy registration */ +int blkcg_policy_register(struct blkcg_policy *pol); +void blkcg_policy_unregister(struct blkcg_policy *pol); +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol); +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol); + +const char *blkg_dev_name(struct blkcg_gq *blkg); +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total); +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); + +struct blkg_conf_ctx { + struct block_device *bdev; + struct blkcg_gq *blkg; + char *body; +}; + +struct block_device *blkcg_conf_open_bdev(char **inputp); +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + char *input, struct blkg_conf_ctx *ctx); +void blkg_conf_finish(struct blkg_conf_ctx *ctx); + +/** + * blkcg_css - find the current css + * + * Find the css associated with either the kthread or the current task. + * This may return a dying css, so it is up to the caller to use tryget logic + * to confirm it is alive and well. + */ +static inline struct cgroup_subsys_state *blkcg_css(void) +{ + struct cgroup_subsys_state *css; + + css = kthread_blkcg(); + if (css) + return css; + return task_css(current, io_cgrp_id); +} + +/** + * __bio_blkcg - internal, inconsistent version to get blkcg + * + * DO NOT USE. + * This function is inconsistent and consequently is dangerous to use. The + * first part of the function returns a blkcg where a reference is owned by the + * bio. This means it does not need to be rcu protected as it cannot go away + * with the bio owning a reference to it. However, the latter potentially gets + * it from task_css(). This can race against task migration and the cgroup + * dying. It is also semantically different as it must be called rcu protected + * and is susceptible to failure when trying to get a reference to it. + * Therefore, it is not ok to assume that *_get() will always succeed on the + * blkcg returned here. + */ +static inline struct blkcg *__bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return css_to_blkcg(blkcg_css()); +} + +/** + * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg + * @return: true if this bio needs to be submitted with the root blkg context. + * + * In order to avoid priority inversions we sometimes need to issue a bio as if + * it were attached to the root blkg, and then backcharge to the actual owning + * blkg. The idea is we do bio_blkcg() to look up the actual context for the + * bio and attach the appropriate blkg to the bio. Then we call this helper and + * if it is true run with the root blkg for that queue and then do any + * backcharging to the originating cgroup once the io is complete. + */ +static inline bool bio_issue_as_root_blkg(struct bio *bio) +{ + return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; +} + +/** + * __blkg_lookup - internal version of blkg_lookup() + * @blkcg: blkcg of interest + * @q: request_queue of interest + * @update_hint: whether to update lookup hint with the result or not + * + * This is internal version and shouldn't be used by policy + * implementations. Looks up blkgs for the @blkcg - @q pair regardless of + * @q's bypass state. If @update_hint is %true, the caller should be + * holding @q->queue_lock and lookup hint is updated on success. + */ +static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, + struct request_queue *q, + bool update_hint) +{ + struct blkcg_gq *blkg; + + if (blkcg == &blkcg_root) + return q->root_blkg; + + blkg = rcu_dereference(blkcg->blkg_hint); + if (blkg && blkg->q == q) + return blkg; + + return blkg_lookup_slowpath(blkcg, q, update_hint); +} + +/** + * blkg_lookup - lookup blkg for the specified blkcg - q pair + * @blkcg: blkcg of interest + * @q: request_queue of interest + * + * Lookup blkg for the @blkcg - @q pair. This function should be called + * under RCU read lock. + */ +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, + struct request_queue *q) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return __blkg_lookup(blkcg, q, false); +} + +/** + * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair + * @q: request_queue of interest + * + * Lookup blkg for @q at the root level. See also blkg_lookup(). + */ +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ + return q->root_blkg; +} + +/** + * blkg_to_pdata - get policy private data + * @blkg: blkg of interest + * @pol: policy of interest + * + * Return pointer to private data associated with the @blkg-@pol pair. + */ +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) +{ + return blkg ? blkg->pd[pol->plid] : NULL; +} + +static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, + struct blkcg_policy *pol) +{ + return blkcg ? blkcg->cpd[pol->plid] : NULL; +} + +/** + * pdata_to_blkg - get blkg associated with policy private data + * @pd: policy private data of interest + * + * @pd is policy private data. Determine the blkg it's associated with. + */ +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) +{ + return pd ? pd->blkg : NULL; +} + +static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) +{ + return cpd ? cpd->blkcg : NULL; +} + +/** + * blkg_path - format cgroup path of blkg + * @blkg: blkg of interest + * @buf: target buffer + * @buflen: target buffer length + * + * Format the path of the cgroup of @blkg into @buf. + */ +static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) +{ + return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); +} + +/** + * blkg_get - get a blkg reference + * @blkg: blkg to get + * + * The caller should be holding an existing reference. + */ +static inline void blkg_get(struct blkcg_gq *blkg) +{ + percpu_ref_get(&blkg->refcnt); +} + +/** + * blkg_tryget - try and get a blkg reference + * @blkg: blkg to get + * + * This is for use when doing an RCU lookup of the blkg. We may be in the midst + * of freeing this blkg, so we can only use it if the refcnt is not zero. + */ +static inline bool blkg_tryget(struct blkcg_gq *blkg) +{ + return blkg && percpu_ref_tryget(&blkg->refcnt); +} + +/** + * blkg_put - put a blkg reference + * @blkg: blkg to put + */ +static inline void blkg_put(struct blkcg_gq *blkg) +{ + percpu_ref_put(&blkg->refcnt); +} + +/** + * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU + * read locked. If called under either blkcg or queue lock, the iteration + * is guaranteed to include all and only online blkgs. The caller may + * update @pos_css by calling css_rightmost_descendant() to skip subtree. + * @p_blkg is included in the iteration and the first node to be visited. + */ +#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +/** + * blkg_for_each_descendant_post - post-order walk of a blkg's descendants + * @d_blkg: loop cursor pointing to the current descendant + * @pos_css: used for iteration + * @p_blkg: target blkg to walk descendants of + * + * Similar to blkg_for_each_descendant_pre() but performs post-order + * traversal instead. Synchronization rules are the same. @p_blkg is + * included in the iteration and the last node to be visited. + */ +#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ + css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ + if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ + (p_blkg)->q, false))) + +bool __blkcg_punt_bio_submit(struct bio *bio); + +static inline bool blkcg_punt_bio_submit(struct bio *bio) +{ + if (bio->bi_opf & REQ_CGROUP_PUNT) + return __blkcg_punt_bio_submit(bio); + else + return false; +} + +static inline void blkcg_bio_issue_init(struct bio *bio) +{ + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +} + +static inline void blkcg_use_delay(struct blkcg_gq *blkg) +{ + if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) + return; + if (atomic_add_return(1, &blkg->use_delay) == 1) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); +} + +static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + if (WARN_ON_ONCE(old < 0)) + return 0; + if (old == 0) + return 0; + + /* + * We do this song and dance because we can race with somebody else + * adding or removing delay. If we just did an atomic_dec we'd end up + * negative and we'd already be in trouble. We need to subtract 1 and + * then check to see if we were the last delay so we can drop the + * congestion count on the cgroup. + */ + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); + if (cur == old) + break; + old = cur; + } + + if (old == 0) + return 0; + if (old == 1) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + return 1; +} + +/** + * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount + * @blkg: target blkg + * @delay: delay duration in nsecs + * + * When enabled with this function, the delay is not decayed and must be + * explicitly cleared with blkcg_clear_delay(). Must not be mixed with + * blkcg_[un]use_delay() and blkcg_add_delay() usages. + */ +static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) +{ + int old = atomic_read(&blkg->use_delay); + + /* We only want 1 person setting the congestion count for this blkg. */ + if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + + atomic64_set(&blkg->delay_nsec, delay); +} + +/** + * blkcg_clear_delay - Disable allocator delay mechanism + * @blkg: target blkg + * + * Disable use_delay mechanism. See blkcg_set_delay(). + */ +static inline void blkcg_clear_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + /* We only want 1 person clearing the congestion count for this blkg. */ + if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); +} + +void blk_cgroup_bio_start(struct bio *bio); +void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); +#else /* CONFIG_BLK_CGROUP */ + +struct blkg_policy_data { +}; + +struct blkcg_policy_data { +}; + +struct blkcg_policy { +}; + +#ifdef CONFIG_BLOCK + +static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } +static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ return NULL; } +static inline int blkcg_init_queue(struct request_queue *q) { return 0; } +static inline void blkcg_exit_queue(struct request_queue *q) { } +static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } +static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } +static inline int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { return 0; } +static inline void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol) { } + +static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } + +static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) { return NULL; } +static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } +static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } +static inline void blkg_get(struct blkcg_gq *blkg) { } +static inline void blkg_put(struct blkcg_gq *blkg) { } + +static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } +static inline void blkcg_bio_issue_init(struct bio *bio) { } +static inline void blk_cgroup_bio_start(struct bio *bio) { } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + +#endif /* CONFIG_BLOCK */ +#endif /* CONFIG_BLK_CGROUP */ + +#endif /* _BLK_CGROUP_PRIVATE_H */ diff --git a/block/blk-core.c b/block/blk-core.c index ff972b968f25..5a4a59041629 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -49,6 +48,7 @@ #include "blk.h" #include "blk-mq-sched.h" #include "blk-pm.h" +#include "blk-cgroup.h" #include "blk-throttle.h" struct dentry *blk_debugfs_root; diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index c87aba8584c6..18c8eafe20b9 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -10,7 +10,6 @@ #define pr_fmt(fmt) "blk-crypto-fallback: " fmt #include -#include #include #include #include @@ -20,6 +19,7 @@ #include #include +#include "blk-cgroup.h" #include "blk-crypto-internal.h" static unsigned int num_prealloc_bounce_pg = 32; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 769b64394298..70a0a3d680a3 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -178,12 +178,12 @@ #include #include #include -#include #include #include #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk-wbt.h" +#include "blk-cgroup.h" #ifdef CONFIG_TRACEPOINTS diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 6593c7123b97..010e658d44a8 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -74,9 +74,9 @@ #include #include #include -#include #include "blk-rq-qos.h" #include "blk-stat.h" +#include "blk-cgroup.h" #include "blk.h" #define DEFAULT_SCALE_COOKIE 1000000U diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c index 2e7f10e1c03f..79e797f5d194 100644 --- a/block/blk-ioprio.c +++ b/block/blk-ioprio.c @@ -12,11 +12,11 @@ * Documentation/admin-guide/cgroup-v2.rst. */ -#include #include #include #include #include +#include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-rq-qos.h" diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 9f32882ceb2f..4c6b7dff71e5 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include "blk.h" @@ -18,6 +17,7 @@ #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" #include "blk-wbt.h" +#include "blk-cgroup.h" #include "blk-throttle.h" struct queue_sysfs_entry { diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 7c462c006b26..73640d80e99e 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -10,7 +10,6 @@ #include #include #include -#include #include "blk.h" #include "blk-cgroup-rwstat.h" #include "blk-stat.h" diff --git a/block/bounce.c b/block/bounce.c index 3fd3bc6fd5db..3d50d19cde72 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -24,6 +23,7 @@ #include #include "blk.h" +#include "blk-cgroup.h" #define POOL_SIZE 64 #define ISA_POOL_SIZE 16 diff --git a/block/elevator.c b/block/elevator.c index ec98aed39c4f..6847ab6e7aa5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -35,7 +35,6 @@ #include #include #include -#include #include @@ -44,6 +43,7 @@ #include "blk-mq-sched.h" #include "blk-pm.h" #include "blk-wbt.h" +#include "blk-cgroup.h" static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index bdc49bd4eef0..f2ad8ed8f777 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -25,12 +25,8 @@ #include #include -/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ -#define BLKG_STAT_CPU_BATCH (INT_MAX / 2) - #define FC_APPID_LEN 129 - #ifdef CONFIG_BLK_CGROUP enum blkg_iostat_type { @@ -42,6 +38,7 @@ enum blkg_iostat_type { }; struct blkcg_gq; +struct blkg_policy_data; struct blkcg { struct cgroup_subsys_state css; @@ -74,36 +71,6 @@ struct blkg_iostat_set { struct blkg_iostat last; }; -/* - * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a - * request_queue (q). This is used by blkcg policies which need to track - * information per blkcg - q pair. - * - * There can be multiple active blkcg policies and each blkg:policy pair is - * represented by a blkg_policy_data which is allocated and freed by each - * policy's pd_alloc/free_fn() methods. A policy can allocate private data - * area by allocating larger data structure which embeds blkg_policy_data - * at the beginning. - */ -struct blkg_policy_data { - /* the blkg and policy id this per-policy data belongs to */ - struct blkcg_gq *blkg; - int plid; -}; - -/* - * Policies that need to keep per-blkcg data which is independent from any - * request_queue associated to it should implement cpd_alloc/free_fn() - * methods. A policy can allocate private data area by allocating larger - * data structure which embeds blkcg_policy_data at the beginning. - * cpd_init() is invoked to let each policy handle per-blkcg data. - */ -struct blkcg_policy_data { - /* the blkcg and policy id this per-policy data belongs to */ - struct blkcg *blkcg; - int plid; -}; - /* association between a blk cgroup and a request queue */ struct blkcg_gq { /* Pointer to the associated request_queue */ @@ -139,120 +106,17 @@ struct blkcg_gq { struct rcu_head rcu_head; }; -typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); -typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); -typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); -typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); -typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, - struct request_queue *q, struct blkcg *blkcg); -typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); -typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); -typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); -typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); -typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); -typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, - struct seq_file *s); - -struct blkcg_policy { - int plid; - /* cgroup files for the policy */ - struct cftype *dfl_cftypes; - struct cftype *legacy_cftypes; - - /* operations */ - blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; - blkcg_pol_init_cpd_fn *cpd_init_fn; - blkcg_pol_free_cpd_fn *cpd_free_fn; - blkcg_pol_bind_cpd_fn *cpd_bind_fn; - - blkcg_pol_alloc_pd_fn *pd_alloc_fn; - blkcg_pol_init_pd_fn *pd_init_fn; - blkcg_pol_online_pd_fn *pd_online_fn; - blkcg_pol_offline_pd_fn *pd_offline_fn; - blkcg_pol_free_pd_fn *pd_free_fn; - blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; - blkcg_pol_stat_pd_fn *pd_stat_fn; -}; - -extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; -extern bool blkcg_debug_stats; - -struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, - struct request_queue *q, bool update_hint); -int blkcg_init_queue(struct request_queue *q); -void blkcg_exit_queue(struct request_queue *q); - -/* Blkio controller policy registration */ -int blkcg_policy_register(struct blkcg_policy *pol); -void blkcg_policy_unregister(struct blkcg_policy *pol); -int blkcg_activate_policy(struct request_queue *q, - const struct blkcg_policy *pol); -void blkcg_deactivate_policy(struct request_queue *q, - const struct blkcg_policy *pol); - -const char *blkg_dev_name(struct blkcg_gq *blkg); -void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, - u64 (*prfill)(struct seq_file *, - struct blkg_policy_data *, int), - const struct blkcg_policy *pol, int data, - bool show_total); -u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); - -struct blkg_conf_ctx { - struct block_device *bdev; - struct blkcg_gq *blkg; - char *body; -}; - -struct block_device *blkcg_conf_open_bdev(char **inputp); -int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - char *input, struct blkg_conf_ctx *ctx); -void blkg_conf_finish(struct blkg_conf_ctx *ctx); -/** - * blkcg_css - find the current css - * - * Find the css associated with either the kthread or the current task. - * This may return a dying css, so it is up to the caller to use tryget logic - * to confirm it is alive and well. - */ -static inline struct cgroup_subsys_state *blkcg_css(void) -{ - struct cgroup_subsys_state *css; - - css = kthread_blkcg(); - if (css) - return css; - return task_css(current, io_cgrp_id); -} +void blkcg_destroy_blkgs(struct blkcg *blkcg); +void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); +void blkcg_maybe_throttle_current(void); static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -/** - * __bio_blkcg - internal, inconsistent version to get blkcg - * - * DO NOT USE. - * This function is inconsistent and consequently is dangerous to use. The - * first part of the function returns a blkcg where a reference is owned by the - * bio. This means it does not need to be rcu protected as it cannot go away - * with the bio owning a reference to it. However, the latter potentially gets - * it from task_css(). This can race against task migration and the cgroup - * dying. It is also semantically different as it must be called rcu protected - * and is susceptible to failure when trying to get a reference to it. - * Therefore, it is not ok to assume that *_get() will always succeed on the - * blkcg returned here. - */ -static inline struct blkcg *__bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return css_to_blkcg(blkcg_css()); -} - /** * bio_blkcg - grab the blkcg associated with a bio * @bio: target bio @@ -288,22 +152,6 @@ static inline bool blk_cgroup_congested(void) return ret; } -/** - * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg - * @return: true if this bio needs to be submitted with the root blkg context. - * - * In order to avoid priority inversions we sometimes need to issue a bio as if - * it were attached to the root blkg, and then backcharge to the actual owning - * blkg. The idea is we do bio_blkcg() to look up the actual context for the - * bio and attach the appropriate blkg to the bio. Then we call this helper and - * if it is true run with the root blkg for that queue and then do any - * backcharging to the originating cgroup once the io is complete. - */ -static inline bool bio_issue_as_root_blkg(struct bio *bio) -{ - return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; -} - /** * blkcg_parent - get the parent of a blkcg * @blkcg: blkcg of interest @@ -315,96 +163,6 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) return css_to_blkcg(blkcg->css.parent); } -/** - * __blkg_lookup - internal version of blkg_lookup() - * @blkcg: blkcg of interest - * @q: request_queue of interest - * @update_hint: whether to update lookup hint with the result or not - * - * This is internal version and shouldn't be used by policy - * implementations. Looks up blkgs for the @blkcg - @q pair regardless of - * @q's bypass state. If @update_hint is %true, the caller should be - * holding @q->queue_lock and lookup hint is updated on success. - */ -static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, - struct request_queue *q, - bool update_hint) -{ - struct blkcg_gq *blkg; - - if (blkcg == &blkcg_root) - return q->root_blkg; - - blkg = rcu_dereference(blkcg->blkg_hint); - if (blkg && blkg->q == q) - return blkg; - - return blkg_lookup_slowpath(blkcg, q, update_hint); -} - -/** - * blkg_lookup - lookup blkg for the specified blkcg - q pair - * @blkcg: blkcg of interest - * @q: request_queue of interest - * - * Lookup blkg for the @blkcg - @q pair. This function should be called - * under RCU read lock. - */ -static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, - struct request_queue *q) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - return __blkg_lookup(blkcg, q, false); -} - -/** - * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair - * @q: request_queue of interest - * - * Lookup blkg for @q at the root level. See also blkg_lookup(). - */ -static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) -{ - return q->root_blkg; -} - -/** - * blkg_to_pdata - get policy private data - * @blkg: blkg of interest - * @pol: policy of interest - * - * Return pointer to private data associated with the @blkg-@pol pair. - */ -static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, - struct blkcg_policy *pol) -{ - return blkg ? blkg->pd[pol->plid] : NULL; -} - -static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, - struct blkcg_policy *pol) -{ - return blkcg ? blkcg->cpd[pol->plid] : NULL; -} - -/** - * pdata_to_blkg - get blkg associated with policy private data - * @pd: policy private data of interest - * - * @pd is policy private data. Determine the blkg it's associated with. - */ -static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) -{ - return pd ? pd->blkg : NULL; -} - -static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) -{ - return cpd ? cpd->blkcg : NULL; -} - -extern void blkcg_destroy_blkgs(struct blkcg *blkcg); - /** * blkcg_pin_online - pin online state * @blkcg: blkcg of interest @@ -437,231 +195,24 @@ static inline void blkcg_unpin_online(struct blkcg *blkcg) } while (blkcg); } -/** - * blkg_path - format cgroup path of blkg - * @blkg: blkg of interest - * @buf: target buffer - * @buflen: target buffer length - * - * Format the path of the cgroup of @blkg into @buf. - */ -static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) -{ - return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); -} - -/** - * blkg_get - get a blkg reference - * @blkg: blkg to get - * - * The caller should be holding an existing reference. - */ -static inline void blkg_get(struct blkcg_gq *blkg) -{ - percpu_ref_get(&blkg->refcnt); -} - -/** - * blkg_tryget - try and get a blkg reference - * @blkg: blkg to get - * - * This is for use when doing an RCU lookup of the blkg. We may be in the midst - * of freeing this blkg, so we can only use it if the refcnt is not zero. - */ -static inline bool blkg_tryget(struct blkcg_gq *blkg) -{ - return blkg && percpu_ref_tryget(&blkg->refcnt); -} - -/** - * blkg_put - put a blkg reference - * @blkg: blkg to put - */ -static inline void blkg_put(struct blkcg_gq *blkg) -{ - percpu_ref_put(&blkg->refcnt); -} - -/** - * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants - * @d_blkg: loop cursor pointing to the current descendant - * @pos_css: used for iteration - * @p_blkg: target blkg to walk descendants of - * - * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU - * read locked. If called under either blkcg or queue lock, the iteration - * is guaranteed to include all and only online blkgs. The caller may - * update @pos_css by calling css_rightmost_descendant() to skip subtree. - * @p_blkg is included in the iteration and the first node to be visited. - */ -#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ - css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ - if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ - (p_blkg)->q, false))) - -/** - * blkg_for_each_descendant_post - post-order walk of a blkg's descendants - * @d_blkg: loop cursor pointing to the current descendant - * @pos_css: used for iteration - * @p_blkg: target blkg to walk descendants of - * - * Similar to blkg_for_each_descendant_pre() but performs post-order - * traversal instead. Synchronization rules are the same. @p_blkg is - * included in the iteration and the last node to be visited. - */ -#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ - css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ - if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ - (p_blkg)->q, false))) - -bool __blkcg_punt_bio_submit(struct bio *bio); - -static inline bool blkcg_punt_bio_submit(struct bio *bio) -{ - if (bio->bi_opf & REQ_CGROUP_PUNT) - return __blkcg_punt_bio_submit(bio); - else - return false; -} - -static inline void blkcg_bio_issue_init(struct bio *bio) -{ - bio_issue_init(&bio->bi_issue, bio_sectors(bio)); -} - -static inline void blkcg_use_delay(struct blkcg_gq *blkg) -{ - if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) - return; - if (atomic_add_return(1, &blkg->use_delay) == 1) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); -} - -static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) -{ - int old = atomic_read(&blkg->use_delay); - - if (WARN_ON_ONCE(old < 0)) - return 0; - if (old == 0) - return 0; - - /* - * We do this song and dance because we can race with somebody else - * adding or removing delay. If we just did an atomic_dec we'd end up - * negative and we'd already be in trouble. We need to subtract 1 and - * then check to see if we were the last delay so we can drop the - * congestion count on the cgroup. - */ - while (old) { - int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); - if (cur == old) - break; - old = cur; - } - - if (old == 0) - return 0; - if (old == 1) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); - return 1; -} - -/** - * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount - * @blkg: target blkg - * @delay: delay duration in nsecs - * - * When enabled with this function, the delay is not decayed and must be - * explicitly cleared with blkcg_clear_delay(). Must not be mixed with - * blkcg_[un]use_delay() and blkcg_add_delay() usages. - */ -static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) -{ - int old = atomic_read(&blkg->use_delay); - - /* We only want 1 person setting the congestion count for this blkg. */ - if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) - atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); - - atomic64_set(&blkg->delay_nsec, delay); -} - -/** - * blkcg_clear_delay - Disable allocator delay mechanism - * @blkg: target blkg - * - * Disable use_delay mechanism. See blkcg_set_delay(). - */ -static inline void blkcg_clear_delay(struct blkcg_gq *blkg) -{ - int old = atomic_read(&blkg->use_delay); - - /* We only want 1 person clearing the congestion count for this blkg. */ - if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); -} - -void blk_cgroup_bio_start(struct bio *bio); -void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); -void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); -void blkcg_maybe_throttle_current(void); #else /* CONFIG_BLK_CGROUP */ struct blkcg { }; -struct blkg_policy_data { -}; - -struct blkcg_policy_data { -}; - struct blkcg_gq { }; -struct blkcg_policy { -}; - #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) static inline void blkcg_maybe_throttle_current(void) { } static inline bool blk_cgroup_congested(void) { return false; } #ifdef CONFIG_BLOCK - static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } - -static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } -static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) -{ return NULL; } -static inline int blkcg_init_queue(struct request_queue *q) { return 0; } -static inline void blkcg_exit_queue(struct request_queue *q) { } -static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } -static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } -static inline int blkcg_activate_policy(struct request_queue *q, - const struct blkcg_policy *pol) { return 0; } -static inline void blkcg_deactivate_policy(struct request_queue *q, - const struct blkcg_policy *pol) { } - -static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } +#endif /* CONFIG_BLOCK */ -static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, - struct blkcg_policy *pol) { return NULL; } -static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } -static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } -static inline void blkg_get(struct blkcg_gq *blkg) { } -static inline void blkg_put(struct blkcg_gq *blkg) { } - -static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } -static inline void blkcg_bio_issue_init(struct bio *bio) { } -static inline void blk_cgroup_bio_start(struct bio *bio) { } - -#define blk_queue_for_each_rl(rl, q) \ - for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) - -#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_BLK_CGROUP_FC_APPID -- cgit v1.2.3-71-gd317 From a8abb0c3dc1e28454851a00f8b7333d9695d566c Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Wed, 9 Feb 2022 12:33:23 +0530 Subject: bpf: Fix crash due to incorrect copy_map_value When both bpf_spin_lock and bpf_timer are present in a BPF map value, copy_map_value needs to skirt both objects when copying a value into and out of the map. However, the current code does not set both s_off and t_off in copy_map_value, which leads to a crash when e.g. bpf_spin_lock is placed in map value with bpf_timer, as bpf_map_update_elem call will be able to overwrite the other timer object. When the issue is not fixed, an overwriting can produce the following splat: [root@(none) bpf]# ./test_progs -t timer_crash [ 15.930339] bpf_testmod: loading out-of-tree module taints kernel. [ 16.037849] ================================================================== [ 16.038458] BUG: KASAN: user-memory-access in __pv_queued_spin_lock_slowpath+0x32b/0x520 [ 16.038944] Write of size 8 at addr 0000000000043ec0 by task test_progs/325 [ 16.039399] [ 16.039514] CPU: 0 PID: 325 Comm: test_progs Tainted: G OE 5.16.0+ #278 [ 16.039983] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ArchLinux 1.15.0-1 04/01/2014 [ 16.040485] Call Trace: [ 16.040645] [ 16.040805] dump_stack_lvl+0x59/0x73 [ 16.041069] ? __pv_queued_spin_lock_slowpath+0x32b/0x520 [ 16.041427] kasan_report.cold+0x116/0x11b [ 16.041673] ? __pv_queued_spin_lock_slowpath+0x32b/0x520 [ 16.042040] __pv_queued_spin_lock_slowpath+0x32b/0x520 [ 16.042328] ? memcpy+0x39/0x60 [ 16.042552] ? pv_hash+0xd0/0xd0 [ 16.042785] ? lockdep_hardirqs_off+0x95/0xd0 [ 16.043079] __bpf_spin_lock_irqsave+0xdf/0xf0 [ 16.043366] ? bpf_get_current_comm+0x50/0x50 [ 16.043608] ? jhash+0x11a/0x270 [ 16.043848] bpf_timer_cancel+0x34/0xe0 [ 16.044119] bpf_prog_c4ea1c0f7449940d_sys_enter+0x7c/0x81 [ 16.044500] bpf_trampoline_6442477838_0+0x36/0x1000 [ 16.044836] __x64_sys_nanosleep+0x5/0x140 [ 16.045119] do_syscall_64+0x59/0x80 [ 16.045377] ? lock_is_held_type+0xe4/0x140 [ 16.045670] ? irqentry_exit_to_user_mode+0xa/0x40 [ 16.046001] ? mark_held_locks+0x24/0x90 [ 16.046287] ? asm_exc_page_fault+0x1e/0x30 [ 16.046569] ? asm_exc_page_fault+0x8/0x30 [ 16.046851] ? lockdep_hardirqs_on+0x7e/0x100 [ 16.047137] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 16.047405] RIP: 0033:0x7f9e4831718d [ 16.047602] Code: b4 0c 00 0f 05 eb a9 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d b3 6c 0c 00 f7 d8 64 89 01 48 [ 16.048764] RSP: 002b:00007fff488086b8 EFLAGS: 00000206 ORIG_RAX: 0000000000000023 [ 16.049275] RAX: ffffffffffffffda RBX: 00007f9e48683740 RCX: 00007f9e4831718d [ 16.049747] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00007fff488086d0 [ 16.050225] RBP: 00007fff488086f0 R08: 00007fff488085d7 R09: 00007f9e4cb594a0 [ 16.050648] R10: 0000000000000000 R11: 0000000000000206 R12: 00007f9e484cde30 [ 16.051124] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 [ 16.051608] [ 16.051762] ================================================================== Fixes: 68134668c17f ("bpf: Add map side support for bpf timers.") Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220209070324.1093182-2-memxor@gmail.com --- include/linux/bpf.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fa517ae604ad..31a83449808b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -224,7 +224,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) if (unlikely(map_value_has_spin_lock(map))) { s_off = map->spin_lock_off; s_sz = sizeof(struct bpf_spin_lock); - } else if (unlikely(map_value_has_timer(map))) { + } + if (unlikely(map_value_has_timer(map))) { t_off = map->timer_off; t_sz = sizeof(struct bpf_timer); } -- cgit v1.2.3-71-gd317 From 5eaed6eedbe9612f642ad2b880f961d1c6c8ec2b Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 11 Feb 2022 11:49:53 -0800 Subject: bpf: Fix a bpf_timer initialization issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The patch in [1] intends to fix a bpf_timer related issue, but the fix caused existing 'timer' selftest to fail with hang or some random errors. After some debug, I found an issue with check_and_init_map_value() in the hashtab.c. More specifically, in hashtab.c, we have code l_new = bpf_map_kmalloc_node(&htab->map, ...) check_and_init_map_value(&htab->map, l_new...) Note that bpf_map_kmalloc_node() does not do initialization so l_new contains random value. The function check_and_init_map_value() intends to zero the bpf_spin_lock and bpf_timer if they exist in the map. But I found bpf_spin_lock is zero'ed but bpf_timer is not zero'ed. With [1], later copy_map_value() skips copying of bpf_spin_lock and bpf_timer. The non-zero bpf_timer caused random failures for 'timer' selftest. Without [1], for both bpf_spin_lock and bpf_timer case, bpf_timer will be zero'ed, so 'timer' self test is okay. For check_and_init_map_value(), why bpf_spin_lock is zero'ed properly while bpf_timer not. In bpf uapi header, we have struct bpf_spin_lock { __u32 val; }; struct bpf_timer { __u64 :64; __u64 :64; } __attribute__((aligned(8))); The initialization code: *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = (struct bpf_spin_lock){}; *(struct bpf_timer *)(dst + map->timer_off) = (struct bpf_timer){}; It appears the compiler has no obligation to initialize anonymous fields. For example, let us use clang with bpf target as below: $ cat t.c struct bpf_timer { unsigned long long :64; }; struct bpf_timer2 { unsigned long long a; }; void test(struct bpf_timer *t) { *t = (struct bpf_timer){}; } void test2(struct bpf_timer2 *t) { *t = (struct bpf_timer2){}; } $ clang -target bpf -O2 -c -g t.c $ llvm-objdump -d t.o ... 0000000000000000 : 0: 95 00 00 00 00 00 00 00 exit 0000000000000008 : 1: b7 02 00 00 00 00 00 00 r2 = 0 2: 7b 21 00 00 00 00 00 00 *(u64 *)(r1 + 0) = r2 3: 95 00 00 00 00 00 00 00 exit gcc11.2 does not have the above issue. But from INTERNATIONAL STANDARD ©ISO/IEC ISO/IEC 9899:201x Programming languages — C http://www.open-std.org/Jtc1/sc22/wg14/www/docs/n1547.pdf page 157: Except where explicitly stated otherwise, for the purposes of this subclause unnamed members of objects of structure and union type do not participate in initialization. Unnamed members of structure objects have indeterminate value even after initialization. To fix the problem, let use memset for bpf_timer case in check_and_init_map_value(). For consistency, memset is also used for bpf_spin_lock case. [1] https://lore.kernel.org/bpf/20220209070324.1093182-2-memxor@gmail.com/ Fixes: 68134668c17f3 ("bpf: Add map side support for bpf timers.") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220211194953.3142152-1-yhs@fb.com --- include/linux/bpf.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 31a83449808b..d0ad379d1e62 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map) static inline void check_and_init_map_value(struct bpf_map *map, void *dst) { if (unlikely(map_value_has_spin_lock(map))) - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; + memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock)); if (unlikely(map_value_has_timer(map))) - *(struct bpf_timer *)(dst + map->timer_off) = - (struct bpf_timer){}; + memset(dst + map->timer_off, 0, sizeof(struct bpf_timer)); } /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ -- cgit v1.2.3-71-gd317 From e496132ebedd870b67f1f6d2428f9bb9d7ae27fd Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 8 Feb 2022 09:43:34 +0000 Subject: sched/fair: Adjust the allowed NUMA imbalance when SD_NUMA spans multiple LLCs Commit 7d2b5dd0bcc4 ("sched/numa: Allow a floating imbalance between NUMA nodes") allowed an imbalance between NUMA nodes such that communicating tasks would not be pulled apart by the load balancer. This works fine when there is a 1:1 relationship between LLC and node but can be suboptimal for multiple LLCs if independent tasks prematurely use CPUs sharing cache. Zen* has multiple LLCs per node with local memory channels and due to the allowed imbalance, it's far harder to tune some workloads to run optimally than it is on hardware that has 1 LLC per node. This patch allows an imbalance to exist up to the point where LLCs should be balanced between nodes. On a Zen3 machine running STREAM parallelised with OMP to have on instance per LLC the results and without binding, the results are 5.17.0-rc0 5.17.0-rc0 vanilla sched-numaimb-v6 MB/sec copy-16 162596.94 ( 0.00%) 580559.74 ( 257.05%) MB/sec scale-16 136901.28 ( 0.00%) 374450.52 ( 173.52%) MB/sec add-16 157300.70 ( 0.00%) 564113.76 ( 258.62%) MB/sec triad-16 151446.88 ( 0.00%) 564304.24 ( 272.61%) STREAM can use directives to force the spread if the OpenMP is new enough but that doesn't help if an application uses threads and it's not known in advance how many threads will be created. Coremark is a CPU and cache intensive benchmark parallelised with threads. When running with 1 thread per core, the vanilla kernel allows threads to contend on cache. With the patch; 5.17.0-rc0 5.17.0-rc0 vanilla sched-numaimb-v5 Min Score-16 368239.36 ( 0.00%) 389816.06 ( 5.86%) Hmean Score-16 388607.33 ( 0.00%) 427877.08 * 10.11%* Max Score-16 408945.69 ( 0.00%) 481022.17 ( 17.62%) Stddev Score-16 15247.04 ( 0.00%) 24966.82 ( -63.75%) CoeffVar Score-16 3.92 ( 0.00%) 5.82 ( -48.48%) It can also make a big difference for semi-realistic workloads like specjbb which can execute arbitrary numbers of threads without advance knowledge of how they should be placed. Even in cases where the average performance is neutral, the results are more stable. 5.17.0-rc0 5.17.0-rc0 vanilla sched-numaimb-v6 Hmean tput-1 71631.55 ( 0.00%) 73065.57 ( 2.00%) Hmean tput-8 582758.78 ( 0.00%) 556777.23 ( -4.46%) Hmean tput-16 1020372.75 ( 0.00%) 1009995.26 ( -1.02%) Hmean tput-24 1416430.67 ( 0.00%) 1398700.11 ( -1.25%) Hmean tput-32 1687702.72 ( 0.00%) 1671357.04 ( -0.97%) Hmean tput-40 1798094.90 ( 0.00%) 2015616.46 * 12.10%* Hmean tput-48 1972731.77 ( 0.00%) 2333233.72 ( 18.27%) Hmean tput-56 2386872.38 ( 0.00%) 2759483.38 ( 15.61%) Hmean tput-64 2909475.33 ( 0.00%) 2925074.69 ( 0.54%) Hmean tput-72 2585071.36 ( 0.00%) 2962443.97 ( 14.60%) Hmean tput-80 2994387.24 ( 0.00%) 3015980.59 ( 0.72%) Hmean tput-88 3061408.57 ( 0.00%) 3010296.16 ( -1.67%) Hmean tput-96 3052394.82 ( 0.00%) 2784743.41 ( -8.77%) Hmean tput-104 2997814.76 ( 0.00%) 2758184.50 ( -7.99%) Hmean tput-112 2955353.29 ( 0.00%) 2859705.09 ( -3.24%) Hmean tput-120 2889770.71 ( 0.00%) 2764478.46 ( -4.34%) Hmean tput-128 2871713.84 ( 0.00%) 2750136.73 ( -4.23%) Stddev tput-1 5325.93 ( 0.00%) 2002.53 ( 62.40%) Stddev tput-8 6630.54 ( 0.00%) 10905.00 ( -64.47%) Stddev tput-16 25608.58 ( 0.00%) 6851.16 ( 73.25%) Stddev tput-24 12117.69 ( 0.00%) 4227.79 ( 65.11%) Stddev tput-32 27577.16 ( 0.00%) 8761.05 ( 68.23%) Stddev tput-40 59505.86 ( 0.00%) 2048.49 ( 96.56%) Stddev tput-48 168330.30 ( 0.00%) 93058.08 ( 44.72%) Stddev tput-56 219540.39 ( 0.00%) 30687.02 ( 86.02%) Stddev tput-64 121750.35 ( 0.00%) 9617.36 ( 92.10%) Stddev tput-72 223387.05 ( 0.00%) 34081.13 ( 84.74%) Stddev tput-80 128198.46 ( 0.00%) 22565.19 ( 82.40%) Stddev tput-88 136665.36 ( 0.00%) 27905.97 ( 79.58%) Stddev tput-96 111925.81 ( 0.00%) 99615.79 ( 11.00%) Stddev tput-104 146455.96 ( 0.00%) 28861.98 ( 80.29%) Stddev tput-112 88740.49 ( 0.00%) 58288.23 ( 34.32%) Stddev tput-120 186384.86 ( 0.00%) 45812.03 ( 75.42%) Stddev tput-128 78761.09 ( 0.00%) 57418.48 ( 27.10%) Similarly, for embarassingly parallel problems like NPB-ep, there are improvements due to better spreading across LLC when the machine is not fully utilised. vanilla sched-numaimb-v6 Min ep.D 31.79 ( 0.00%) 26.11 ( 17.87%) Amean ep.D 31.86 ( 0.00%) 26.17 * 17.86%* Stddev ep.D 0.07 ( 0.00%) 0.05 ( 24.41%) CoeffVar ep.D 0.22 ( 0.00%) 0.20 ( 7.97%) Max ep.D 31.93 ( 0.00%) 26.21 ( 17.91%) Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Gautham R. Shenoy Tested-by: K Prateek Nayak Link: https://lore.kernel.org/r/20220208094334.16379-3-mgorman@techsingularity.net --- include/linux/sched/topology.h | 1 + kernel/sched/fair.c | 22 ++++++++++-------- kernel/sched/topology.c | 53 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 8054641c0a7b..56cffe42abbc 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -93,6 +93,7 @@ struct sched_domain { unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ea710168ae91..5c4bfffe8c2c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1489,6 +1489,7 @@ struct task_numa_env { int src_cpu, src_nid; int dst_cpu, dst_nid; + int imb_numa_nr; struct numa_stats src_stats, dst_stats; @@ -1503,7 +1504,7 @@ struct task_numa_env { static unsigned long cpu_load(struct rq *rq); static unsigned long cpu_runnable(struct rq *rq); static inline long adjust_numa_imbalance(int imbalance, - int dst_running, int dst_weight); + int dst_running, int imb_numa_nr); static inline enum numa_type numa_classify(unsigned int imbalance_pct, @@ -1884,7 +1885,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, dst_running = env->dst_stats.nr_running + 1; imbalance = max(0, dst_running - src_running); imbalance = adjust_numa_imbalance(imbalance, dst_running, - env->dst_stats.weight); + env->imb_numa_nr); /* Use idle CPU if there is no imbalance */ if (!imbalance) { @@ -1949,8 +1950,10 @@ static int task_numa_migrate(struct task_struct *p) */ rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); - if (sd) + if (sd) { env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; + env.imb_numa_nr = sd->imb_numa_nr; + } rcu_read_unlock(); /* @@ -9005,10 +9008,9 @@ static bool update_pick_idlest(struct sched_group *idlest, * This is an approximation as the number of running tasks may not be * related to the number of busy CPUs due to sched_setaffinity. */ -static inline bool -allow_numa_imbalance(unsigned int running, unsigned int weight) +static inline bool allow_numa_imbalance(int running, int imb_numa_nr) { - return (running < (weight >> 2)); + return running <= imb_numa_nr; } /* @@ -9148,7 +9150,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) * allowed. If there is a real need of migration, * periodic load balance will take care of it. */ - if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, local_sgs.group_weight)) + if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, sd->imb_numa_nr)) return NULL; } @@ -9240,9 +9242,9 @@ next_group: #define NUMA_IMBALANCE_MIN 2 static inline long adjust_numa_imbalance(int imbalance, - int dst_running, int dst_weight) + int dst_running, int imb_numa_nr) { - if (!allow_numa_imbalance(dst_running, dst_weight)) + if (!allow_numa_imbalance(dst_running, imb_numa_nr)) return imbalance; /* @@ -9354,7 +9356,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s /* Consider allowing a small imbalance between NUMA groups */ if (env->sd->flags & SD_NUMA) { env->imbalance = adjust_numa_imbalance(env->imbalance, - local->sum_nr_running + 1, local->group_weight); + local->sum_nr_running + 1, env->sd->imb_numa_nr); } return; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index d201a7052a29..e6cd55951304 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2242,6 +2242,59 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } + /* + * Calculate an allowed NUMA imbalance such that LLCs do not get + * imbalanced. + */ + for_each_cpu(i, cpu_map) { + unsigned int imb = 0; + unsigned int imb_span = 1; + + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { + struct sched_domain *child = sd->child; + + if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && + (child->flags & SD_SHARE_PKG_RESOURCES)) { + struct sched_domain *top, *top_p; + unsigned int nr_llcs; + + /* + * For a single LLC per node, allow an + * imbalance up to 25% of the node. This is an + * arbitrary cutoff based on SMT-2 to balance + * between memory bandwidth and avoiding + * premature sharing of HT resources and SMT-4 + * or SMT-8 *may* benefit from a different + * cutoff. + * + * For multiple LLCs, allow an imbalance + * until multiple tasks would share an LLC + * on one node while LLCs on another node + * remain idle. + */ + nr_llcs = sd->span_weight / child->span_weight; + if (nr_llcs == 1) + imb = sd->span_weight >> 2; + else + imb = nr_llcs; + sd->imb_numa_nr = imb; + + /* Set span based on the first NUMA domain. */ + top = sd; + top_p = top->parent; + while (top_p && !(top_p->flags & SD_NUMA)) { + top = top->parent; + top_p = top->parent; + } + imb_span = top_p ? top_p->span_weight : sd->span_weight; + } else { + int factor = max(1U, (sd->span_weight / imb_span)); + + sd->imb_numa_nr = imb * factor; + } + } + } + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map)) -- cgit v1.2.3-71-gd317 From 0764db9b49c932b89ee4d9e3236dff4bb07b4a66 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 11 Feb 2022 16:32:32 -0800 Subject: mm: memcg: synchronize objcg lists with a dedicated spinlock Alexander reported a circular lock dependency revealed by the mmap1 ltp test: LOCKDEP_CIRCULAR (suite: ltp, case: mtest06 (mmap1)) WARNING: possible circular locking dependency detected 5.17.0-20220113.rc0.git0.f2211f194038.300.fc35.s390x+debug #1 Not tainted ------------------------------------------------------ mmap1/202299 is trying to acquire lock: 00000001892c0188 (css_set_lock){..-.}-{2:2}, at: obj_cgroup_release+0x4a/0xe0 but task is already holding lock: 00000000ca3b3818 (&sighand->siglock){-.-.}-{2:2}, at: force_sig_info_to_task+0x38/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sighand->siglock){-.-.}-{2:2}: __lock_acquire+0x604/0xbd8 lock_acquire.part.0+0xe2/0x238 lock_acquire+0xb0/0x200 _raw_spin_lock_irqsave+0x6a/0xd8 __lock_task_sighand+0x90/0x190 cgroup_freeze_task+0x2e/0x90 cgroup_migrate_execute+0x11c/0x608 cgroup_update_dfl_csses+0x246/0x270 cgroup_subtree_control_write+0x238/0x518 kernfs_fop_write_iter+0x13e/0x1e0 new_sync_write+0x100/0x190 vfs_write+0x22c/0x2d8 ksys_write+0x6c/0xf8 __do_syscall+0x1da/0x208 system_call+0x82/0xb0 -> #0 (css_set_lock){..-.}-{2:2}: check_prev_add+0xe0/0xed8 validate_chain+0x736/0xb20 __lock_acquire+0x604/0xbd8 lock_acquire.part.0+0xe2/0x238 lock_acquire+0xb0/0x200 _raw_spin_lock_irqsave+0x6a/0xd8 obj_cgroup_release+0x4a/0xe0 percpu_ref_put_many.constprop.0+0x150/0x168 drain_obj_stock+0x94/0xe8 refill_obj_stock+0x94/0x278 obj_cgroup_charge+0x164/0x1d8 kmem_cache_alloc+0xac/0x528 __sigqueue_alloc+0x150/0x308 __send_signal+0x260/0x550 send_signal+0x7e/0x348 force_sig_info_to_task+0x104/0x180 force_sig_fault+0x48/0x58 __do_pgm_check+0x120/0x1f0 pgm_check_handler+0x11e/0x180 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sighand->siglock); lock(css_set_lock); lock(&sighand->siglock); lock(css_set_lock); *** DEADLOCK *** 2 locks held by mmap1/202299: #0: 00000000ca3b3818 (&sighand->siglock){-.-.}-{2:2}, at: force_sig_info_to_task+0x38/0x180 #1: 00000001892ad560 (rcu_read_lock){....}-{1:2}, at: percpu_ref_put_many.constprop.0+0x0/0x168 stack backtrace: CPU: 15 PID: 202299 Comm: mmap1 Not tainted 5.17.0-20220113.rc0.git0.f2211f194038.300.fc35.s390x+debug #1 Hardware name: IBM 3906 M04 704 (LPAR) Call Trace: dump_stack_lvl+0x76/0x98 check_noncircular+0x136/0x158 check_prev_add+0xe0/0xed8 validate_chain+0x736/0xb20 __lock_acquire+0x604/0xbd8 lock_acquire.part.0+0xe2/0x238 lock_acquire+0xb0/0x200 _raw_spin_lock_irqsave+0x6a/0xd8 obj_cgroup_release+0x4a/0xe0 percpu_ref_put_many.constprop.0+0x150/0x168 drain_obj_stock+0x94/0xe8 refill_obj_stock+0x94/0x278 obj_cgroup_charge+0x164/0x1d8 kmem_cache_alloc+0xac/0x528 __sigqueue_alloc+0x150/0x308 __send_signal+0x260/0x550 send_signal+0x7e/0x348 force_sig_info_to_task+0x104/0x180 force_sig_fault+0x48/0x58 __do_pgm_check+0x120/0x1f0 pgm_check_handler+0x11e/0x180 INFO: lockdep is turned off. In this example a slab allocation from __send_signal() caused a refilling and draining of a percpu objcg stock, resulted in a releasing of another non-related objcg. Objcg release path requires taking the css_set_lock, which is used to synchronize objcg lists. This can create a circular dependency with the sighandler lock, which is taken with the locked css_set_lock by the freezer code (to freeze a task). In general it seems that using css_set_lock to synchronize objcg lists makes any slab allocations and deallocation with the locked css_set_lock and any intervened locks risky. To fix the problem and make the code more robust let's stop using css_set_lock to synchronize objcg lists and use a new dedicated spinlock instead. Link: https://lkml.kernel.org/r/Yfm1IHmoGdyUR81T@carbon.dhcp.thefacebook.com Fixes: bf4f059954dc ("mm: memcg/slab: obj_cgroup API") Signed-off-by: Roman Gushchin Reported-by: Alexander Egorenkov Tested-by: Alexander Egorenkov Reviewed-by: Waiman Long Acked-by: Tejun Heo Reviewed-by: Shakeel Butt Reviewed-by: Jeremy Linton Tested-by: Jeremy Linton Cc: Johannes Weiner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++-- mm/memcontrol.c | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b72d75141e12..0abbd685703b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -219,7 +219,7 @@ struct obj_cgroup { struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { - struct list_head list; + struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; }; @@ -315,7 +315,8 @@ struct mem_cgroup { #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; struct obj_cgroup __rcu *objcg; - struct list_head objcg_list; /* list of inherited objcgs */ + /* list of inherited objcgs, protected by objcg_lock */ + struct list_head objcg_list; #endif MEMCG_PADDING(_pad2_); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09d342c7cbd0..36e9f38c919d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -254,7 +254,7 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) } #ifdef CONFIG_MEMCG_KMEM -extern spinlock_t css_set_lock; +static DEFINE_SPINLOCK(objcg_lock); bool mem_cgroup_kmem_disabled(void) { @@ -298,9 +298,9 @@ static void obj_cgroup_release(struct percpu_ref *ref) if (nr_pages) obj_cgroup_uncharge_pages(objcg, nr_pages); - spin_lock_irqsave(&css_set_lock, flags); + spin_lock_irqsave(&objcg_lock, flags); list_del(&objcg->list); - spin_unlock_irqrestore(&css_set_lock, flags); + spin_unlock_irqrestore(&objcg_lock, flags); percpu_ref_exit(ref); kfree_rcu(objcg, rcu); @@ -332,7 +332,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, objcg = rcu_replace_pointer(memcg->objcg, NULL, true); - spin_lock_irq(&css_set_lock); + spin_lock_irq(&objcg_lock); /* 1) Ready to reparent active objcg. */ list_add(&objcg->list, &memcg->objcg_list); @@ -342,7 +342,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, /* 3) Move already reparented objcgs to the parent's list */ list_splice(&memcg->objcg_list, &parent->objcg_list); - spin_unlock_irq(&css_set_lock); + spin_unlock_irq(&objcg_lock); percpu_ref_kill(&objcg->refcnt); } -- cgit v1.2.3-71-gd317 From 8913c61001482378d4ed8cc577b17c1ba3e847e4 Mon Sep 17 00:00:00 2001 From: Peng Liu Date: Fri, 11 Feb 2022 16:32:35 -0800 Subject: kfence: make test case compatible with run time set sample interval The parameter kfence_sample_interval can be set via boot parameter and late shell command, which is convenient for automated tests and KFENCE parameter optimization. However, KFENCE test case just uses compile-time CONFIG_KFENCE_SAMPLE_INTERVAL, which will make KFENCE test case not run as users desired. Export kfence_sample_interval, so that KFENCE test case can use run-time-set sample interval. Link: https://lkml.kernel.org/r/20220207034432.185532-1-liupeng256@huawei.com Signed-off-by: Peng Liu Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jonathan Corbet Cc: Sumit Semwal Cc: Christian Knig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kfence.h | 2 ++ mm/kfence/core.c | 3 ++- mm/kfence/kfence_test.c | 8 ++++---- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 4b5e3679a72c..f49e64222628 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -17,6 +17,8 @@ #include #include +extern unsigned long kfence_sample_interval; + /* * We allocate an even number of pages, as it simplifies calculations to map * address to metadata indices; effectively, the very first page serves as an diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 5ad40e3add45..13128fa13062 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -47,7 +47,8 @@ static bool kfence_enabled __read_mostly; -static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index a22b1af85577..50dbb815a2a8 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL); + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled. */ - resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL); + resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); do { if (test_cache) alloc = kmem_cache_alloc(test_cache, gfp); @@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test) int i; /* Skip if we think it'd take too long. */ - KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100); + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); setup_test_cache(test, size, 0, NULL); buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); @@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test) * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL); + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), -- cgit v1.2.3-71-gd317 From 6d240170811aad7330e6d0b3857fb0d4d9c82b56 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 7 Feb 2022 10:05:40 +0800 Subject: firmware: imx: add get resource owner api Add resource owner management API, this API could be used to check whether M4 is under control of Linux. Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/firmware/imx/rm.c | 45 +++++++++++++++++++++++++++++++++++++ include/linux/firmware/imx/svc/rm.h | 5 +++++ 2 files changed, 50 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c index a12db6ff323b..d492b99e1c6c 100644 --- a/drivers/firmware/imx/rm.c +++ b/drivers/firmware/imx/rm.c @@ -43,3 +43,48 @@ bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) return hdr->func; } EXPORT_SYMBOL(imx_sc_rm_is_resource_owned); + +struct imx_sc_msg_rm_get_resource_owner { + struct imx_sc_rpc_msg hdr; + union { + struct { + u16 resource; + } req; + struct { + u8 val; + } resp; + } data; +} __packed __aligned(4); + +/* + * This function get @resource partition number + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * @param[out] pt pointer to return the partition number + * + * @return Returns 0 for success and < 0 for errors. + */ +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + struct imx_sc_msg_rm_get_resource_owner msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_RM; + hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER; + hdr->size = 2; + + msg.data.req.resource = resource; + + ret = imx_scu_call_rpc(ipc, &msg, true); + if (ret) + return ret; + + if (pt) + *pt = msg.data.resp.val; + + return 0; +} +EXPORT_SYMBOL(imx_sc_rm_get_resource_owner); diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h index 456b6a59d29b..31456f897aa9 100644 --- a/include/linux/firmware/imx/svc/rm.h +++ b/include/linux/firmware/imx/svc/rm.h @@ -59,11 +59,16 @@ enum imx_sc_rm_func { #if IS_ENABLED(CONFIG_IMX_SCU) bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt); #else static inline bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) { return true; } +static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + return -EOPNOTSUPP; +} #endif #endif -- cgit v1.2.3-71-gd317 From 3e96dcfb96e80d2f7f1edb6a1ac81b12de996fa8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 23:32:27 +0100 Subject: ARM: ixp4xx: Delete the Goramo MLR boardfile MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This board is replaced with the corresponding device tree. Also delete dangling platform data file only used by this boardfile and nothing else. Cc: Krzysztof Hałasa Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220211223238.648934-3-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- arch/arm/mach-ixp4xx/Kconfig | 7 - arch/arm/mach-ixp4xx/Makefile | 2 - arch/arm/mach-ixp4xx/goramo_mlr.c | 532 --------------------------- include/linux/platform_data/wan_ixp4xx_hss.h | 17 - 4 files changed, 558 deletions(-) delete mode 100644 arch/arm/mach-ixp4xx/goramo_mlr.c delete mode 100644 include/linux/platform_data/wan_ixp4xx_hss.h (limited to 'include/linux') diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index e6b23c3ce50c..0fac12cb31a6 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -17,13 +17,6 @@ config MACH_IXP4XX_OF help Say 'Y' here to support Device Tree-based IXP4xx platforms. -config MACH_GORAMO_MLR - bool "GORAMO Multi Link Router" - depends on IXP4XX_PCI_LEGACY - help - Say 'Y' here if you want your kernel to support GORAMO - MultiLink router. - config ARCH_PRPMC1100 bool "PrPMC1100" help diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile index 0a92f8c40e1c..83719704a626 100644 --- a/arch/arm/mach-ixp4xx/Makefile +++ b/arch/arm/mach-ixp4xx/Makefile @@ -11,6 +11,4 @@ obj-pci-$(CONFIG_MACH_IXP4XX_OF) += ixp4xx-of.o obj-y += common.o -obj-$(CONFIG_MACH_GORAMO_MLR) += goramo_mlr.o - obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c deleted file mode 100644 index 07b50dfcc489..000000000000 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ /dev/null @@ -1,532 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Goramo MultiLink router platform code - * Copyright (C) 2006-2009 Krzysztof Halasa - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "irqs.h" - -#define SLOT_ETHA 0x0B /* IDSEL = AD21 */ -#define SLOT_ETHB 0x0C /* IDSEL = AD20 */ -#define SLOT_MPCI 0x0D /* IDSEL = AD19 */ -#define SLOT_NEC 0x0E /* IDSEL = AD18 */ - -/* GPIO lines */ -#define GPIO_SCL 0 -#define GPIO_SDA 1 -#define GPIO_STR 2 -#define GPIO_IRQ_NEC 3 -#define GPIO_IRQ_ETHA 4 -#define GPIO_IRQ_ETHB 5 -#define GPIO_HSS0_DCD_N 6 -#define GPIO_HSS1_DCD_N 7 -#define GPIO_UART0_DCD 8 -#define GPIO_UART1_DCD 9 -#define GPIO_HSS0_CTS_N 10 -#define GPIO_HSS1_CTS_N 11 -#define GPIO_IRQ_MPCI 12 -#define GPIO_HSS1_RTS_N 13 -#define GPIO_HSS0_RTS_N 14 -/* GPIO15 is not connected */ - -/* Control outputs from 74HC4094 */ -#define CONTROL_HSS0_CLK_INT 0 -#define CONTROL_HSS1_CLK_INT 1 -#define CONTROL_HSS0_DTR_N 2 -#define CONTROL_HSS1_DTR_N 3 -#define CONTROL_EXT 4 -#define CONTROL_AUTO_RESET 5 -#define CONTROL_PCI_RESET_N 6 -#define CONTROL_EEPROM_WC_N 7 - -/* offsets from start of flash ROM = 0x50000000 */ -#define CFG_ETH0_ADDRESS 0x40 /* 6 bytes */ -#define CFG_ETH1_ADDRESS 0x46 /* 6 bytes */ -#define CFG_REV 0x4C /* u32 */ -#define CFG_SDRAM_SIZE 0x50 /* u32 */ -#define CFG_SDRAM_CONF 0x54 /* u32 */ -#define CFG_SDRAM_MODE 0x58 /* u32 */ -#define CFG_SDRAM_REFRESH 0x5C /* u32 */ - -#define CFG_HW_BITS 0x60 /* u32 */ -#define CFG_HW_USB_PORTS 0x00000007 /* 0 = no NEC chip, 1-5 = ports # */ -#define CFG_HW_HAS_PCI_SLOT 0x00000008 -#define CFG_HW_HAS_ETH0 0x00000010 -#define CFG_HW_HAS_ETH1 0x00000020 -#define CFG_HW_HAS_HSS0 0x00000040 -#define CFG_HW_HAS_HSS1 0x00000080 -#define CFG_HW_HAS_UART0 0x00000100 -#define CFG_HW_HAS_UART1 0x00000200 -#define CFG_HW_HAS_EEPROM 0x00000400 - -#define FLASH_CMD_READ_ARRAY 0xFF -#define FLASH_CMD_READ_ID 0x90 -#define FLASH_SER_OFF 0x102 /* 0x81 in 16-bit mode */ - -static u32 hw_bits = 0xFFFFFFFD; /* assume all hardware present */; -static u8 control_value; - -/* - * FIXME: this is reimplementing I2C bit-bangining. Move this - * over to using driver/i2c/busses/i2c-gpio.c like all other boards - * and register proper I2C device(s) on the bus for this. (See - * other IXP4xx boards for examples.) - */ -static void set_scl(u8 value) -{ - gpio_set_value(GPIO_SCL, !!value); - udelay(3); -} - -static void set_sda(u8 value) -{ - gpio_set_value(GPIO_SDA, !!value); - udelay(3); -} - -static void set_str(u8 value) -{ - gpio_set_value(GPIO_STR, !!value); - udelay(3); -} - -static inline void set_control(int line, int value) -{ - if (value) - control_value |= (1 << line); - else - control_value &= ~(1 << line); -} - - -static void output_control(void) -{ - int i; - - gpio_direction_output(GPIO_SCL, 1); - gpio_direction_output(GPIO_SDA, 1); - - for (i = 0; i < 8; i++) { - set_scl(0); - set_sda(control_value & (0x80 >> i)); /* MSB first */ - set_scl(1); /* active edge */ - } - - set_str(1); - set_str(0); - - set_scl(0); - set_sda(1); /* Be ready for START */ - set_scl(1); -} - - -static void (*set_carrier_cb_tab[2])(void *pdev, int carrier); - -static int hss_set_clock(int port, unsigned int clock_type) -{ - int ctrl_int = port ? CONTROL_HSS1_CLK_INT : CONTROL_HSS0_CLK_INT; - - switch (clock_type) { - case CLOCK_DEFAULT: - case CLOCK_EXT: - set_control(ctrl_int, 0); - output_control(); - return CLOCK_EXT; - - case CLOCK_INT: - set_control(ctrl_int, 1); - output_control(); - return CLOCK_INT; - - default: - return -EINVAL; - } -} - -static irqreturn_t hss_dcd_irq(int irq, void *pdev) -{ - int port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N)); - int i = gpio_get_value(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N); - set_carrier_cb_tab[port](pdev, !i); - return IRQ_HANDLED; -} - - -static int hss_open(int port, void *pdev, - void (*set_carrier_cb)(void *pdev, int carrier)) -{ - int i, irq; - - if (!port) - irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N); - else - irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N); - - i = gpio_get_value(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N); - set_carrier_cb(pdev, !i); - - set_carrier_cb_tab[!!port] = set_carrier_cb; - - if ((i = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", pdev)) != 0) { - printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n", - irq, i); - return i; - } - - set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0); - output_control(); - gpio_set_value(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0); - return 0; -} - -static void hss_close(int port, void *pdev) -{ - free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) : - IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev); - set_carrier_cb_tab[!!port] = NULL; /* catch bugs */ - - set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1); - output_control(); - gpio_set_value(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1); -} - - -/* Flash memory */ -static struct flash_platform_data flash_data = { - .map_name = "cfi_probe", - .width = 2, -}; - -static struct resource flash_resource = { - .flags = IORESOURCE_MEM, -}; - -static struct platform_device device_flash = { - .name = "IXP4XX-Flash", - .id = 0, - .dev = { .platform_data = &flash_data }, - .num_resources = 1, - .resource = &flash_resource, -}; - -/* IXP425 2 UART ports */ -static struct resource uart_resources[] = { - { - .start = IXP4XX_UART1_BASE_PHYS, - .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, - .flags = IORESOURCE_MEM, - }, - { - .start = IXP4XX_UART2_BASE_PHYS, - .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, - .flags = IORESOURCE_MEM, - } -}; - -static struct plat_serial8250_port uart_data[] = { - { - .mapbase = IXP4XX_UART1_BASE_PHYS, - .membase = (char __iomem *)IXP4XX_UART1_BASE_VIRT + - REG_OFFSET, - .irq = IRQ_IXP4XX_UART1, - .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, - .iotype = UPIO_MEM, - .regshift = 2, - .uartclk = IXP4XX_UART_XTAL, - }, - { - .mapbase = IXP4XX_UART2_BASE_PHYS, - .membase = (char __iomem *)IXP4XX_UART2_BASE_VIRT + - REG_OFFSET, - .irq = IRQ_IXP4XX_UART2, - .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, - .iotype = UPIO_MEM, - .regshift = 2, - .uartclk = IXP4XX_UART_XTAL, - }, - { }, -}; - -static struct platform_device device_uarts = { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .dev.platform_data = uart_data, - .num_resources = 2, - .resource = uart_resources, -}; - - -/* Built-in 10/100 Ethernet MAC interfaces */ -static struct resource eth_npeb_resources[] = { - { - .start = IXP4XX_EthB_BASE_PHYS, - .end = IXP4XX_EthB_BASE_PHYS + 0x0fff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct resource eth_npec_resources[] = { - { - .start = IXP4XX_EthC_BASE_PHYS, - .end = IXP4XX_EthC_BASE_PHYS + 0x0fff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct eth_plat_info eth_plat[] = { - { - .phy = 0, - .rxq = 3, - .txreadyq = 32, - }, { - .phy = 1, - .rxq = 4, - .txreadyq = 33, - } -}; - -static struct platform_device device_eth_tab[] = { - { - .name = "ixp4xx_eth", - .id = IXP4XX_ETH_NPEB, - .dev.platform_data = eth_plat, - .num_resources = ARRAY_SIZE(eth_npeb_resources), - .resource = eth_npeb_resources, - }, { - .name = "ixp4xx_eth", - .id = IXP4XX_ETH_NPEC, - .dev.platform_data = eth_plat + 1, - .num_resources = ARRAY_SIZE(eth_npec_resources), - .resource = eth_npec_resources, - } -}; - - -/* IXP425 2 synchronous serial ports */ -static struct hss_plat_info hss_plat[] = { - { - .set_clock = hss_set_clock, - .open = hss_open, - .close = hss_close, - .txreadyq = 34, - }, { - .set_clock = hss_set_clock, - .open = hss_open, - .close = hss_close, - .txreadyq = 35, - } -}; - -static struct platform_device device_hss_tab[] = { - { - .name = "ixp4xx_hss", - .id = 0, - .dev.platform_data = hss_plat, - }, { - .name = "ixp4xx_hss", - .id = 1, - .dev.platform_data = hss_plat + 1, - } -}; - - -static struct platform_device *device_tab[7] __initdata = { - &device_flash, /* index 0 */ -}; - -static inline u8 __init flash_readb(u8 __iomem *flash, u32 addr) -{ -#ifdef __ARMEB__ - return __raw_readb(flash + addr); -#else - return __raw_readb(flash + (addr ^ 3)); -#endif -} - -static inline u16 __init flash_readw(u8 __iomem *flash, u32 addr) -{ -#ifdef __ARMEB__ - return __raw_readw(flash + addr); -#else - return __raw_readw(flash + (addr ^ 2)); -#endif -} - -static void __init gmlr_init(void) -{ - u8 __iomem *flash; - int i, devices = 1; /* flash */ - - ixp4xx_sys_init(); - - if ((flash = ioremap(IXP4XX_EXP_BUS_BASE_PHYS, 0x80)) == NULL) - printk(KERN_ERR "goramo-mlr: unable to access system" - " configuration data\n"); - else { - system_rev = __raw_readl(flash + CFG_REV); - hw_bits = __raw_readl(flash + CFG_HW_BITS); - - for (i = 0; i < ETH_ALEN; i++) { - eth_plat[0].hwaddr[i] = - flash_readb(flash, CFG_ETH0_ADDRESS + i); - eth_plat[1].hwaddr[i] = - flash_readb(flash, CFG_ETH1_ADDRESS + i); - } - - __raw_writew(FLASH_CMD_READ_ID, flash); - system_serial_high = flash_readw(flash, FLASH_SER_OFF); - system_serial_high <<= 16; - system_serial_high |= flash_readw(flash, FLASH_SER_OFF + 2); - system_serial_low = flash_readw(flash, FLASH_SER_OFF + 4); - system_serial_low <<= 16; - system_serial_low |= flash_readw(flash, FLASH_SER_OFF + 6); - __raw_writew(FLASH_CMD_READ_ARRAY, flash); - - iounmap(flash); - } - - switch (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) { - case CFG_HW_HAS_UART0: - memset(&uart_data[1], 0, sizeof(uart_data[1])); - device_uarts.num_resources = 1; - break; - - case CFG_HW_HAS_UART1: - device_uarts.dev.platform_data = &uart_data[1]; - device_uarts.resource = &uart_resources[1]; - device_uarts.num_resources = 1; - break; - } - if (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) - device_tab[devices++] = &device_uarts; /* max index 1 */ - - if (hw_bits & CFG_HW_HAS_ETH0) - device_tab[devices++] = &device_eth_tab[0]; /* max index 2 */ - if (hw_bits & CFG_HW_HAS_ETH1) - device_tab[devices++] = &device_eth_tab[1]; /* max index 3 */ - - if (hw_bits & CFG_HW_HAS_HSS0) - device_tab[devices++] = &device_hss_tab[0]; /* max index 4 */ - if (hw_bits & CFG_HW_HAS_HSS1) - device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */ - - hss_plat[0].timer_freq = ixp4xx_timer_freq; - hss_plat[1].timer_freq = ixp4xx_timer_freq; - - gpio_request(GPIO_SCL, "SCL/clock"); - gpio_request(GPIO_SDA, "SDA/data"); - gpio_request(GPIO_STR, "strobe"); - gpio_request(GPIO_HSS0_RTS_N, "HSS0 RTS"); - gpio_request(GPIO_HSS1_RTS_N, "HSS1 RTS"); - gpio_request(GPIO_HSS0_DCD_N, "HSS0 DCD"); - gpio_request(GPIO_HSS1_DCD_N, "HSS1 DCD"); - - gpio_direction_output(GPIO_SCL, 1); - gpio_direction_output(GPIO_SDA, 1); - gpio_direction_output(GPIO_STR, 0); - gpio_direction_output(GPIO_HSS0_RTS_N, 1); - gpio_direction_output(GPIO_HSS1_RTS_N, 1); - gpio_direction_input(GPIO_HSS0_DCD_N); - gpio_direction_input(GPIO_HSS1_DCD_N); - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); - - set_control(CONTROL_HSS0_DTR_N, 1); - set_control(CONTROL_HSS1_DTR_N, 1); - set_control(CONTROL_EEPROM_WC_N, 1); - set_control(CONTROL_PCI_RESET_N, 1); - output_control(); - - msleep(1); /* Wait for PCI devices to initialize */ - - flash_resource.start = IXP4XX_EXP_BUS_BASE(0); - flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; - - platform_add_devices(device_tab, devices); -} - - -#ifdef CONFIG_PCI -static void __init gmlr_pci_preinit(void) -{ - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); - irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); - ixp4xx_pci_preinit(); -} - -static void __init gmlr_pci_postinit(void) -{ - if ((hw_bits & CFG_HW_USB_PORTS) >= 2 && - (hw_bits & CFG_HW_USB_PORTS) < 5) { - /* need to adjust number of USB ports on NEC chip */ - u32 value, addr = BIT(32 - SLOT_NEC) | 0xE0; - if (!ixp4xx_pci_read(addr, NP_CMD_CONFIGREAD, &value)) { - value &= ~7; - value |= (hw_bits & CFG_HW_USB_PORTS); - ixp4xx_pci_write(addr, NP_CMD_CONFIGWRITE, value); - } - } -} - -static int __init gmlr_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - switch(slot) { - case SLOT_ETHA: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA); - case SLOT_ETHB: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB); - case SLOT_NEC: return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC); - default: return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI); - } -} - -static struct hw_pci gmlr_hw_pci __initdata = { - .nr_controllers = 1, - .ops = &ixp4xx_ops, - .preinit = gmlr_pci_preinit, - .postinit = gmlr_pci_postinit, - .setup = ixp4xx_setup, - .map_irq = gmlr_map_irq, -}; - -static int __init gmlr_pci_init(void) -{ - if (machine_is_goramo_mlr() && - (hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_PCI_SLOT))) - pci_common_init(&gmlr_hw_pci); - return 0; -} - -subsys_initcall(gmlr_pci_init); -#endif /* CONFIG_PCI */ - - -MACHINE_START(GORAMO_MLR, "MultiLink") - /* Maintainer: Krzysztof Halasa */ - .map_io = ixp4xx_map_io, - .init_early = ixp4xx_init_early, - .init_irq = ixp4xx_init_irq, - .init_time = ixp4xx_timer_init, - .atag_offset = 0x100, - .init_machine = gmlr_init, -#if defined(CONFIG_PCI) - .dma_zone_size = SZ_64M, -#endif - .restart = ixp4xx_restart, -MACHINE_END diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h deleted file mode 100644 index d525a0feb9e1..000000000000 --- a/include/linux/platform_data/wan_ixp4xx_hss.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H -#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H - -#include - -/* Information about built-in HSS (synchronous serial) interfaces */ -struct hss_plat_info { - int (*set_clock)(int port, unsigned int clock_type); - int (*open)(int port, void *pdev, - void (*set_carrier_cb)(void *pdev, int carrier)); - void (*close)(int port, void *pdev); - u8 txreadyq; - u32 timer_freq; -}; - -#endif -- cgit v1.2.3-71-gd317 From b50113cbdd1340c31e85e4cfc5f5e81ce9cbb2aa Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 23:32:31 +0100 Subject: soc: ixp4xx: Add features from regmap helper If we want to read the CFG2 register on the expansion bus and apply the inversion and check for some hardcoded versions this helper comes in handy. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220211223238.648934-7-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- include/linux/soc/ixp4xx/cpu.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include/linux') diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h index 88bd8de0e803..48c2e241ac83 100644 --- a/include/linux/soc/ixp4xx/cpu.h +++ b/include/linux/soc/ixp4xx/cpu.h @@ -9,6 +9,7 @@ #define __SOC_IXP4XX_CPU_H__ #include +#include #ifdef CONFIG_ARM #include #endif @@ -23,6 +24,9 @@ #define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */ #define IXP46X_PROCESSOR_ID_MASK 0xfffffff0 +/* Feature register in the expansion bus controller */ +#define IXP4XX_EXP_CNFG2 0x2c + /* "fuse" bits of IXP_EXP_CFG2 */ /* All IXP4xx CPUs */ #define IXP4XX_FEATURE_RCOMP (1 << 0) @@ -89,6 +93,22 @@ u32 ixp4xx_read_feature_bits(void); void ixp4xx_write_feature_bits(u32 value); +static inline u32 cpu_ixp4xx_features(struct regmap *rmap) +{ + u32 val; + + regmap_read(rmap, IXP4XX_EXP_CNFG2, &val); + /* For some reason this register is inverted */ + val = ~val; + if (cpu_is_ixp42x_rev_a0()) + return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP | + IXP4XX_FEATURE_AES); + if (cpu_is_ixp42x()) + return val & IXP42X_FEATURE_MASK; + if (cpu_is_ixp43x()) + return val & IXP43X_FEATURE_MASK; + return val & IXP46X_FEATURE_MASK; +} #else #define cpu_is_ixp42x_rev_a0() 0 #define cpu_is_ixp42x() 0 @@ -101,6 +121,10 @@ static inline u32 ixp4xx_read_feature_bits(void) static inline void ixp4xx_write_feature_bits(u32 value) { } +static inline u32 cpu_ixp4xx_features(struct regmap *rmap) +{ + return 0; +} #endif #endif /* _ASM_ARCH_CPU_H */ -- cgit v1.2.3-71-gd317 From 8754a7e61c766fbc533c627b56ff181550dca00e Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 23:32:32 +0100 Subject: soc: ixp4xx-npe: Access syscon regs using regmap If we access the syscon (expansion bus config registers) using the syscon regmap instead of relying on direct accessor functions, we do not need to call this static code in the machine (arch/arm/mach-ixp4xx/common.c) which makes things less dependent on custom machine-dependent code. Look up the syscon regmap and handle the error: this will make deferred probe work with relation to the syscon. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220211223238.648934-8-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- drivers/soc/ixp4xx/Kconfig | 1 + drivers/soc/ixp4xx/ixp4xx-npe.c | 33 ++++++++++++++++++++++++--------- include/linux/soc/ixp4xx/npe.h | 2 ++ 3 files changed, 27 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig index e3eb19b85fa4..c55f0c9ae513 100644 --- a/drivers/soc/ixp4xx/Kconfig +++ b/drivers/soc/ixp4xx/Kconfig @@ -12,6 +12,7 @@ config IXP4XX_QMGR config IXP4XX_NPE tristate "IXP4xx Network Processor Engine support" select FW_LOADER + select MFD_SYSCON help This driver supports IXP4xx built-in network coprocessors and is automatically selected by Ethernet and HSS drivers. diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index f490c4ca51f5..613935cb6a48 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -284,6 +285,7 @@ static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr, static int npe_reset(struct npe *npe) { + u32 reset_bit = (IXP4XX_FEATURE_RESET_NPEA << npe->id); u32 val, ctl, exec_count, ctx_reg2; int i; @@ -380,16 +382,19 @@ static int npe_reset(struct npe *npe) __raw_writel(0, &npe->regs->action_points[3]); __raw_writel(0, &npe->regs->watch_count); - val = ixp4xx_read_feature_bits(); + /* + * We need to work on cached values here because the register + * will read inverted but needs to be written non-inverted. + */ + val = cpu_ixp4xx_features(npe->rmap); /* reset the NPE */ - ixp4xx_write_feature_bits(val & - ~(IXP4XX_FEATURE_RESET_NPEA << npe->id)); + regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val & ~reset_bit); /* deassert reset */ - ixp4xx_write_feature_bits(val | - (IXP4XX_FEATURE_RESET_NPEA << npe->id)); + regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val | reset_bit); + for (i = 0; i < MAX_RETRIES; i++) { - if (ixp4xx_read_feature_bits() & - (IXP4XX_FEATURE_RESET_NPEA << npe->id)) + val = cpu_ixp4xx_features(npe->rmap); + if (val & reset_bit) break; /* NPE is back alive */ udelay(1); } @@ -683,6 +688,14 @@ static int ixp4xx_npe_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct resource *res; + struct regmap *rmap; + u32 val; + + /* This system has only one syscon, so fetch it */ + rmap = syscon_regmap_lookup_by_compatible("syscon"); + if (IS_ERR(rmap)) + return dev_err_probe(dev, PTR_ERR(rmap), + "failed to look up syscon\n"); for (i = 0; i < NPE_COUNT; i++) { struct npe *npe = &npe_tab[i]; @@ -691,8 +704,9 @@ static int ixp4xx_npe_probe(struct platform_device *pdev) if (!res) return -ENODEV; - if (!(ixp4xx_read_feature_bits() & - (IXP4XX_FEATURE_RESET_NPEA << i))) { + val = cpu_ixp4xx_features(rmap); + + if (!(val & (IXP4XX_FEATURE_RESET_NPEA << i))) { dev_info(dev, "NPE%d at %pR not available\n", i, res); continue; /* NPE already disabled or not present */ @@ -700,6 +714,7 @@ static int ixp4xx_npe_probe(struct platform_device *pdev) npe->regs = devm_ioremap_resource(dev, res); if (IS_ERR(npe->regs)) return PTR_ERR(npe->regs); + npe->rmap = rmap; if (npe_reset(npe)) { dev_info(dev, "NPE%d at %pR does not reset\n", diff --git a/include/linux/soc/ixp4xx/npe.h b/include/linux/soc/ixp4xx/npe.h index 2a91f465d456..9efeac777da1 100644 --- a/include/linux/soc/ixp4xx/npe.h +++ b/include/linux/soc/ixp4xx/npe.h @@ -3,6 +3,7 @@ #define __IXP4XX_NPE_H #include +#include extern const char *npe_names[]; @@ -17,6 +18,7 @@ struct npe_regs { struct npe { struct npe_regs __iomem *regs; + struct regmap *rmap; int id; int valid; }; -- cgit v1.2.3-71-gd317 From c8200f4e7267545a384fb86a4630f76958ab9df6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 23:32:33 +0100 Subject: net: ixp4xx_eth: Drop platform data support All IXP4xx platforms are converted to device tree, the platform data path is no longer used. Drop the code and custom include, confine the driver in its own file. Depend on OF and remove ifdefs around this, as we are all probing from OF now. Cc: David S. Miller Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Signed-off-by: Linus Walleij Acked-by: Jakub Kicinski Link: https://lore.kernel.org/r/20220211223238.648934-9-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- drivers/net/ethernet/xscale/Kconfig | 4 +- drivers/net/ethernet/xscale/ixp4xx_eth.c | 85 +++++++------------------------- include/linux/platform_data/eth_ixp4xx.h | 21 -------- 3 files changed, 21 insertions(+), 89 deletions(-) delete mode 100644 include/linux/platform_data/eth_ixp4xx.h (limited to 'include/linux') diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 0e878fa6e322..b33f64c54b0e 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -20,9 +20,9 @@ if NET_VENDOR_XSCALE config IXP4XX_ETH tristate "Intel IXP4xx Ethernet support" - depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR + depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR && OF select PHYLIB - select OF_MDIO if OF + select OF_MDIO select NET_PTP_CLASSIFY help Say Y here if you want to use built-in Ethernet ports diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index df77a22d1b81..d947955621ee 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -38,6 +37,11 @@ #include #include #include +#include + +#define IXP4XX_ETH_NPEA 0x00 +#define IXP4XX_ETH_NPEB 0x10 +#define IXP4XX_ETH_NPEC 0x20 #include "ixp46x_ts.h" @@ -147,6 +151,16 @@ typedef void buffer_t; #define free_buffer_irq kfree #endif +/* Information about built-in Ethernet MAC interfaces */ +struct eth_plat_info { + u8 phy; /* MII PHY ID, 0 - 31 */ + u8 rxq; /* configurable, currently 0 - 31 only */ + u8 txreadyq; + u8 hwaddr[6]; + u8 npe; /* NPE instance used by this interface */ + bool has_mdio; /* If this instance has an MDIO bus */ +}; + struct eth_regs { u32 tx_control[2], __res1[2]; /* 000 */ u32 rx_control[2], __res2[2]; /* 010 */ @@ -1366,7 +1380,6 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -#ifdef CONFIG_OF static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) { struct device_node *np = dev->of_node; @@ -1417,12 +1430,6 @@ static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) return plat; } -#else -static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) -{ - return NULL; -} -#endif static int ixp4xx_eth_probe(struct platform_device *pdev) { @@ -1434,49 +1441,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) struct port *port; int err; - if (np) { - plat = ixp4xx_of_get_platdata(dev); - if (!plat) - return -ENODEV; - } else { - plat = dev_get_platdata(dev); - if (!plat) - return -ENODEV; - plat->npe = pdev->id; - switch (plat->npe) { - case IXP4XX_ETH_NPEA: - /* If the MDIO bus is not up yet, defer probe */ - break; - case IXP4XX_ETH_NPEB: - /* On all except IXP43x, NPE-B is used for the MDIO bus. - * If there is no NPE-B in the feature set, bail out, - * else we have the MDIO bus here. - */ - if (!cpu_is_ixp43x()) { - if (!(ixp4xx_read_feature_bits() & - IXP4XX_FEATURE_NPEB_ETH0)) - return -ENODEV; - /* Else register the MDIO bus on NPE-B */ - plat->has_mdio = true; - } - break; - case IXP4XX_ETH_NPEC: - /* IXP43x lacks NPE-B and uses NPE-C for the MDIO bus - * access, if there is no NPE-C, no bus, nothing works, - * so bail out. - */ - if (cpu_is_ixp43x()) { - if (!(ixp4xx_read_feature_bits() & - IXP4XX_FEATURE_NPEC_ETH)) - return -ENODEV; - /* Else register the MDIO bus on NPE-B */ - plat->has_mdio = true; - } - break; - default: - return -ENODEV; - } - } + plat = ixp4xx_of_get_platdata(dev); + if (!plat) + return -ENODEV; if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) return -ENOMEM; @@ -1530,21 +1497,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); udelay(50); - if (np) { - phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link); - } else { - phydev = mdiobus_get_phy(mdio_bus, plat->phy); - if (!phydev) { - err = -ENODEV; - dev_err(dev, "could not connect phydev (%d)\n", err); - goto err_free_mem; - } - err = phy_connect_direct(ndev, phydev, ixp4xx_adjust_link, - PHY_INTERFACE_MODE_MII); - if (err) - goto err_free_mem; - - } + phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link); if (!phydev) { err = -ENODEV; dev_err(dev, "no phydev\n"); diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h deleted file mode 100644 index 114b0940729f..000000000000 --- a/include/linux/platform_data/eth_ixp4xx.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PLATFORM_DATA_ETH_IXP4XX -#define __PLATFORM_DATA_ETH_IXP4XX - -#include - -#define IXP4XX_ETH_NPEA 0x00 -#define IXP4XX_ETH_NPEB 0x10 -#define IXP4XX_ETH_NPEC 0x20 - -/* Information about built-in Ethernet MAC interfaces */ -struct eth_plat_info { - u8 phy; /* MII PHY ID, 0 - 31 */ - u8 rxq; /* configurable, currently 0 - 31 only */ - u8 txreadyq; - u8 hwaddr[6]; - u8 npe; /* NPE instance used by this interface */ - bool has_mdio; /* If this instance has an MDIO bus */ -}; - -#endif -- cgit v1.2.3-71-gd317 From 3059dfa52c07a9b6d770e87dc21b68f4295239c5 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 23:32:35 +0100 Subject: ARM: ixp4xx: Remove feature bit accessors We switched users of the accessors over to using syscon to inspect the bits, or removed the need for checking them. Delete these accessors. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220211223238.648934-11-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- arch/arm/mach-ixp4xx/common.c | 21 --------------------- include/linux/soc/ixp4xx/cpu.h | 10 ---------- 2 files changed, 31 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 5192cf621f5b..4e51514ace6d 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -43,27 +43,6 @@ #include "irqs.h" -u32 ixp4xx_read_feature_bits(void) -{ - u32 val = ~__raw_readl(IXP4XX_EXP_CFG2); - - if (cpu_is_ixp42x_rev_a0()) - return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP | - IXP4XX_FEATURE_AES); - if (cpu_is_ixp42x()) - return val & IXP42X_FEATURE_MASK; - if (cpu_is_ixp43x()) - return val & IXP43X_FEATURE_MASK; - return val & IXP46X_FEATURE_MASK; -} -EXPORT_SYMBOL(ixp4xx_read_feature_bits); - -void ixp4xx_write_feature_bits(u32 value) -{ - __raw_writel(~value, IXP4XX_EXP_CFG2); -} -EXPORT_SYMBOL(ixp4xx_write_feature_bits); - #define IXP4XX_TIMER_FREQ 66666000 /************************************************************************* diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h index 48c2e241ac83..f526ac33afea 100644 --- a/include/linux/soc/ixp4xx/cpu.h +++ b/include/linux/soc/ixp4xx/cpu.h @@ -90,9 +90,6 @@ IXP43X_PROCESSOR_ID_VALUE) #define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \ IXP46X_PROCESSOR_ID_VALUE) - -u32 ixp4xx_read_feature_bits(void); -void ixp4xx_write_feature_bits(u32 value); static inline u32 cpu_ixp4xx_features(struct regmap *rmap) { u32 val; @@ -114,13 +111,6 @@ static inline u32 cpu_ixp4xx_features(struct regmap *rmap) #define cpu_is_ixp42x() 0 #define cpu_is_ixp43x() 0 #define cpu_is_ixp46x() 0 -static inline u32 ixp4xx_read_feature_bits(void) -{ - return 0; -} -static inline void ixp4xx_write_feature_bits(u32 value) -{ -} static inline u32 cpu_ixp4xx_features(struct regmap *rmap) { return 0; -- cgit v1.2.3-71-gd317 From f5c54f77b07b278cfde4a654e111c39996ac8b5b Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 4 Feb 2022 09:30:13 +0100 Subject: cpumask: Add a x86-specific cpumask_clear_cpu() helper Add a x86-specific cpumask_clear_cpu() helper which will be used in places where the explicit KASAN-instrumentation in the *_bit() helpers is unwanted. Also, always inline two more cpumask generic helpers. allyesconfig: text data bss dec hex filename 190553143 159425889 32076404 382055436 16c5b40c vmlinux.before 190551812 159424945 32076404 382053161 16c5ab29 vmlinux.after Signed-off-by: Borislav Petkov Acked-by: Marco Elver Link: https://lore.kernel.org/r/20220204083015.17317-2-bp@alien8.de --- arch/x86/include/asm/cpumask.h | 10 ++++++++++ include/linux/cpumask.h | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index 3afa990d756b..c5aed9e9226c 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -20,11 +20,21 @@ static __always_inline bool arch_cpu_online(int cpu) { return arch_test_bit(cpu, cpumask_bits(cpu_online_mask)); } + +static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + arch_clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); +} #else static __always_inline bool arch_cpu_online(int cpu) { return cpu == 0; } + +static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + return; +} #endif #define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu)) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 6b06c698cd2a..fe29ac7cc469 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -102,7 +102,7 @@ extern atomic_t __num_online_cpus; extern cpumask_t cpus_booted_once_mask; -static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS WARN_ON_ONCE(cpu >= bits); @@ -110,7 +110,7 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) } /* verify cpu argument to cpumask_* operators */ -static inline unsigned int cpumask_check(unsigned int cpu) +static __always_inline unsigned int cpumask_check(unsigned int cpu) { cpu_max_bits_warn(cpu, nr_cpumask_bits); return cpu; -- cgit v1.2.3-71-gd317 From 2618a0dae09ef37728dab89ff60418cbe25ae6bd Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 12 Feb 2022 09:14:49 -0800 Subject: etherdevice: Adjust ether_addr* prototypes to silence -Wstringop-overead MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With GCC 12, -Wstringop-overread was warning about an implicit cast from char[6] to char[8]. However, the extra 2 bytes are always thrown away, alignment doesn't matter, and the risk of hitting the edge of unallocated memory has been accepted, so this prototype can just be converted to a regular char *. Silences: net/core/dev.c: In function ‘bpf_prog_run_generic_xdp’: net/core/dev.c:4618:21: warning: ‘ether_addr_equal_64bits’ reading 8 bytes from a region of size 6 [-Wstringop-overread] 4618 | orig_host = ether_addr_equal_64bits(eth->h_dest, > skb->dev->dev_addr); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ net/core/dev.c:4618:21: note: referencing argument 1 of type ‘const u8[8]’ {aka ‘const unsigned char[8]’} net/core/dev.c:4618:21: note: referencing argument 2 of type ‘const u8[8]’ {aka ‘const unsigned char[8]’} In file included from net/core/dev.c:91: include/linux/etherdevice.h:375:20: note: in a call to function ‘ether_addr_equal_64bits’ 375 | static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], | ^~~~~~~~~~~~~~~~~~~~~~~ Reported-by: Marc Kleine-Budde Tested-by: Marc Kleine-Budde Link: https://lore.kernel.org/netdev/20220212090811.uuzk6d76agw2vv73@pengutronix.de Cc: Jakub Kicinski Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- include/linux/etherdevice.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2ad71cc90b37..92b10e67d5f8 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -134,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr) #endif } -static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2]) +static inline bool is_multicast_ether_addr_64bits(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #ifdef __BIG_ENDIAN @@ -372,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ -static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], - const u8 addr2[6+2]) +static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); -- cgit v1.2.3-71-gd317 From 845301001308aab8fb7902548f6c3256d28b8c48 Mon Sep 17 00:00:00 2001 From: Daisuke Nojiri Date: Wed, 26 Jan 2022 10:04:10 -0800 Subject: power: supply: PCHG: Use MKBP for device event handling This change makes the PCHG driver receive device events through MKBP protocol since CrOS EC switched to deliver all peripheral charge events to the MKBP protocol. This will unify PCHG event handling on X86 and ARM. Signed-off-by: Daisuke Nojiri Signed-off-by: Sebastian Reichel --- drivers/power/supply/cros_peripheral_charger.c | 37 +++------------ include/linux/platform_data/cros_ec_commands.h | 64 ++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c index 305f10dfc06d..9fe6d826148d 100644 --- a/drivers/power/supply/cros_peripheral_charger.c +++ b/drivers/power/supply/cros_peripheral_charger.c @@ -14,6 +14,7 @@ #include #include #include +#include #define DRV_NAME "cros-ec-pchg" #define PCHG_DIR_PREFIX "peripheral" @@ -237,46 +238,22 @@ static int cros_pchg_event(const struct charger_data *charger, return NOTIFY_OK; } -static u32 cros_get_device_event(const struct charger_data *charger) -{ - struct ec_params_device_event req; - struct ec_response_device_event rsp; - struct device *dev = charger->dev; - int ret; - - req.param = EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS; - ret = cros_pchg_ec_command(charger, 0, EC_CMD_DEVICE_EVENT, - &req, sizeof(req), &rsp, sizeof(rsp)); - if (ret < 0) { - dev_warn(dev, "Unable to get device events (err:%d)\n", ret); - return 0; - } - - return rsp.event_mask; -} - static int cros_ec_notify(struct notifier_block *nb, unsigned long queued_during_suspend, void *data) { - struct cros_ec_device *ec_dev = (struct cros_ec_device *)data; - u32 host_event = cros_ec_get_host_event(ec_dev); + struct cros_ec_device *ec_dev = data; struct charger_data *charger = container_of(nb, struct charger_data, notifier); - u32 device_event_mask; + u32 host_event; - if (!host_event) + if (ec_dev->event_data.event_type != EC_MKBP_EVENT_PCHG || + ec_dev->event_size != sizeof(host_event)) return NOTIFY_DONE; - if (!(host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_DEVICE))) - return NOTIFY_DONE; + host_event = get_unaligned_le32(&ec_dev->event_data.data.host_event); - /* - * todo: Retrieve device event mask in common place - * (e.g. cros_ec_proto.c). - */ - device_event_mask = cros_get_device_event(charger); - if (!(device_event_mask & EC_DEVICE_EVENT_MASK(EC_DEVICE_EVENT_WLC))) + if (!(host_event & EC_MKBP_PCHG_DEVICE_EVENT)) return NOTIFY_DONE; return cros_pchg_event(charger, host_event); diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 271bd87bff0a..95e7e5667291 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -3386,6 +3386,9 @@ enum ec_mkbp_event { /* Send an incoming CEC message to the AP */ EC_MKBP_EVENT_CEC_MESSAGE = 9, + /* Peripheral device charger event */ + EC_MKBP_EVENT_PCHG = 12, + /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; @@ -5527,6 +5530,67 @@ enum pchg_state { [PCHG_STATE_CONNECTED] = "CONNECTED", \ } +/* + * Update firmware of peripheral chip + */ +#define EC_CMD_PCHG_UPDATE 0x0136 + +/* Port number is encoded in bit[28:31]. */ +#define EC_MKBP_PCHG_PORT_SHIFT 28 +/* Utility macro for converting MKBP event to port number. */ +#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf) +/* Utility macro for extracting event bits. */ +#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \ + & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0)) + +#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0) +#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1) +#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2) +#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3) +#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4) + +enum ec_pchg_update_cmd { + /* Reset chip to normal mode. */ + EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0, + /* Reset and put a chip in update (a.k.a. download) mode. */ + EC_PCHG_UPDATE_CMD_OPEN, + /* Write a block of data containing FW image. */ + EC_PCHG_UPDATE_CMD_WRITE, + /* Close update session. */ + EC_PCHG_UPDATE_CMD_CLOSE, + /* End of commands */ + EC_PCHG_UPDATE_CMD_COUNT, +}; + +struct ec_params_pchg_update { + /* PCHG port number */ + uint8_t port; + /* enum ec_pchg_update_cmd */ + uint8_t cmd; + /* Padding */ + uint8_t reserved0; + uint8_t reserved1; + /* Version of new firmware */ + uint32_t version; + /* CRC32 of new firmware */ + uint32_t crc32; + /* Address in chip memory where is written to */ + uint32_t addr; + /* Size of */ + uint32_t size; + /* Partial data of new firmware */ + uint8_t data[]; +} __ec_align4; + +BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT + < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8)); + +struct ec_response_pchg_update { + /* Block size */ + uint32_t block_size; +} __ec_align4; + + /*****************************************************************************/ /* Voltage regulator controls */ -- cgit v1.2.3-71-gd317 From f68f2ff91512c199ec24883001245912afc17873 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 20 Apr 2021 23:22:52 -0700 Subject: fortify: Detect struct member overflows in memcpy() at compile-time memcpy() is dead; long live memcpy() tl;dr: In order to eliminate a large class of common buffer overflow flaws that continue to persist in the kernel, have memcpy() (under CONFIG_FORTIFY_SOURCE) perform bounds checking of the destination struct member when they have a known size. This would have caught all of the memcpy()-related buffer write overflow flaws identified in at least the last three years. Background and analysis: While stack-based buffer overflow flaws are largely mitigated by stack canaries (and similar) features, heap-based buffer overflow flaws continue to regularly appear in the kernel. Many classes of heap buffer overflows are mitigated by FORTIFY_SOURCE when using the strcpy() family of functions, but a significant number remain exposed through the memcpy() family of functions. At its core, FORTIFY_SOURCE uses the compiler's __builtin_object_size() internal[0] to determine the available size at a target address based on the compile-time known structure layout details. It operates in two modes: outer bounds (0) and inner bounds (1). In mode 0, the size of the enclosing structure is used. In mode 1, the size of the specific field is used. For example: struct object { u16 scalar1; /* 2 bytes */ char array[6]; /* 6 bytes */ u64 scalar2; /* 8 bytes */ u32 scalar3; /* 4 bytes */ u32 scalar4; /* 4 bytes */ } instance; __builtin_object_size(instance.array, 0) == 22, since the remaining size of the enclosing structure starting from "array" is 22 bytes (6 + 8 + 4 + 4). __builtin_object_size(instance.array, 1) == 6, since the remaining size of the specific field "array" is 6 bytes. The initial implementation of FORTIFY_SOURCE used mode 0 because there were many cases of both strcpy() and memcpy() functions being used to write (or read) across multiple fields in a structure. For example, it would catch this, which is writing 2 bytes beyond the end of "instance": memcpy(&instance.array, data, 25); While this didn't protect against overwriting adjacent fields in a given structure, it would at least stop overflows from reaching beyond the end of the structure into neighboring memory, and provided a meaningful mitigation of a subset of buffer overflow flaws. However, many desirable targets remain within the enclosing structure (for example function pointers). As it happened, there were very few cases of strcpy() family functions intentionally writing beyond the end of a string buffer. Once all known cases were removed from the kernel, the strcpy() family was tightened[1] to use mode 1, providing greater mitigation coverage. What remains is switching memcpy() to mode 1 as well, but making the switch is much more difficult because of how frustrating it can be to find existing "normal" uses of memcpy() that expect to write (or read) across multiple fields. The root cause of the problem is that the C language lacks a common pattern to indicate the intent of an author's use of memcpy(), and is further complicated by the available compile-time and run-time mitigation behaviors. The FORTIFY_SOURCE mitigation comes in two halves: the compile-time half, when both the buffer size _and_ the length of the copy is known, and the run-time half, when only the buffer size is known. If neither size is known, there is no bounds checking possible. At compile-time when the compiler sees that a length will always exceed a known buffer size, a warning can be deterministically emitted. For the run-time half, the length is tested against the known size of the buffer, and the overflowing operation is detected. (The performance overhead for these tests is virtually zero.) It is relatively easy to find compile-time false-positives since a warning is always generated. Fixing the false positives, however, can be very time-consuming as there are hundreds of instances. While it's possible some over-read conditions could lead to kernel memory exposures, the bulk of the risk comes from the run-time flaws where the length of a write may end up being attacker-controlled and lead to an overflow. Many of the compile-time false-positives take a form similar to this: memcpy(&instance.scalar2, data, sizeof(instance.scalar2) + sizeof(instance.scalar3)); and the run-time ones are similar, but lack a constant expression for the size of the copy: memcpy(instance.array, data, length); The former is meant to cover multiple fields (though its style has been frowned upon more recently), but has been technically legal. Both lack any expressivity in the C language about the author's _intent_ in a way that a compiler can check when the length isn't known at compile time. A comment doesn't work well because what's needed is something a compiler can directly reason about. Is a given memcpy() call expected to overflow into neighbors? Is it not? By using the new struct_group() macro, this intent can be much more easily encoded. It is not as easy to find the run-time false-positives since the code path to exercise a seemingly out-of-bounds condition that is actually expected may not be trivially reachable. Tightening the restrictions to block an operation for a false positive will either potentially create a greater flaw (if a copy is truncated by the mitigation), or destabilize the kernel (e.g. with a BUG()), making things completely useless for the end user. As a result, tightening the memcpy() restriction (when there is a reasonable level of uncertainty of the number of false positives), needs to first WARN() with no truncation. (Though any sufficiently paranoid end-user can always opt to set the panic_on_warn=1 sysctl.) Once enough development time has passed, the mitigation can be further intensified. (Note that this patch is only the compile-time checking step, which is a prerequisite to doing run-time checking, which will come in future patches.) Given the potential frustrations of weeding out all the false positives when tightening the run-time checks, it is reasonable to wonder if these changes would actually add meaningful protection. Looking at just the last three years, there are 23 identified flaws with a CVE that mention "buffer overflow", and 11 are memcpy()-related buffer overflows. (For the remaining 12: 7 are array index overflows that would be mitigated by systems built with CONFIG_UBSAN_BOUNDS=y: CVE-2019-0145, CVE-2019-14835, CVE-2019-14896, CVE-2019-14897, CVE-2019-14901, CVE-2019-17666, CVE-2021-28952. 2 are miscalculated allocation sizes which could be mitigated with memory tagging: CVE-2019-16746, CVE-2019-2181. 1 is an iovec buffer bug maybe mitigated by memory tagging: CVE-2020-10742. 1 is a type confusion bug mitigated by stack canaries: CVE-2020-10942. 1 is a string handling logic bug with no mitigation I'm aware of: CVE-2021-28972.) At my last count on an x86_64 allmodconfig build, there are 35,294 calls to memcpy(). With callers instrumented to report all places where the buffer size is known but the length remains unknown (i.e. a run-time bounds check is added), we can count how many new run-time bounds checks are added when the destination and source arguments of memcpy() are changed to use "mode 1" bounds checking: 1,276. This means for the future run-time checking, there is a worst-case upper bounds of 3.6% false positives to fix. In addition, there were around 150 new compile-time warnings to evaluate and fix (which have now been fixed). With this instrumentation it's also possible to compare the places where the known 11 memcpy() flaw overflows manifested against the resulting list of potential new run-time bounds checks, as a measure of potential efficacy of the tightened mitigation. Much to my surprise, horror, and delight, all 11 flaws would have been detected by the newly added run-time bounds checks, making this a distinctly clear mitigation improvement: 100% coverage for known memcpy() flaws, with a possible 2 orders of magnitude gain in coverage over existing but undiscovered run-time dynamic length flaws (i.e. 1265 newly covered sites in addition to the 11 known), against only <4% of all memcpy() callers maybe gaining a false positive run-time check, with only about 150 new compile-time instances needing evaluation. Specifically these would have been mitigated: CVE-2020-24490 https://git.kernel.org/linus/a2ec905d1e160a33b2e210e45ad30445ef26ce0e CVE-2020-12654 https://git.kernel.org/linus/3a9b153c5591548612c3955c9600a98150c81875 CVE-2020-12653 https://git.kernel.org/linus/b70261a288ea4d2f4ac7cd04be08a9f0f2de4f4d CVE-2019-14895 https://git.kernel.org/linus/3d94a4a8373bf5f45cf5f939e88b8354dbf2311b CVE-2019-14816 https://git.kernel.org/linus/7caac62ed598a196d6ddf8d9c121e12e082cac3a CVE-2019-14815 https://git.kernel.org/linus/7caac62ed598a196d6ddf8d9c121e12e082cac3a CVE-2019-14814 https://git.kernel.org/linus/7caac62ed598a196d6ddf8d9c121e12e082cac3a CVE-2019-10126 https://git.kernel.org/linus/69ae4f6aac1578575126319d3f55550e7e440449 CVE-2019-9500 https://git.kernel.org/linus/1b5e2423164b3670e8bc9174e4762d297990deff no-CVE-yet https://git.kernel.org/linus/130f634da1af649205f4a3dd86cbe5c126b57914 no-CVE-yet https://git.kernel.org/linus/d10a87a3535cce2b890897914f5d0d83df669c63 To accelerate the review of potential run-time false positives, it's also worth noting that it is possible to partially automate checking by examining the memcpy() buffer argument to check for the destination struct member having a neighboring array member. It is reasonable to expect that the vast majority of run-time false positives would look like the already evaluated and fixed compile-time false positives, where the most common pattern is neighboring arrays. (And, FWIW, many of the compile-time fixes were actual bugs, so it is reasonable to assume we'll have similar cases of actual bugs getting fixed for run-time checks.) Implementation: Tighten the memcpy() destination buffer size checking to use the actual ("mode 1") target buffer size as the bounds check instead of their enclosing structure's ("mode 0") size. Use a common inline for memcpy() (and memmove() in a following patch), since all the tests are the same. All new cross-field memcpy() uses must use the struct_group() macro or similar to target a specific range of fields, so that FORTIFY_SOURCE can reason about the size and safety of the copy. For now, cross-member "mode 1" _read_ detection at compile-time will be limited to W=1 builds, since it is, unfortunately, very common. As the priority is solving write overflows, read overflows will be part of a future phase (and can be fixed in parallel, for anyone wanting to look at W=1 build output). For run-time, the "mode 0" size checking and mitigation is left unchanged, with "mode 1" to be added in stages. In this patch, no new run-time checks are added. Future patches will first bounds-check writes, and only perform a WARN() for now. This way any missed run-time false positives can be flushed out over the coming several development cycles, but system builders who have tested their workloads to be WARN()-free can enable the panic_on_warn=1 sysctl to immediately gain a mitigation against this class of buffer overflows. Once that is under way, run-time bounds-checking of reads can be similarly enabled. Related classes of flaws that will remain unmitigated: - memcpy() with flexible array structures, as the compiler does not currently have visibility into the size of the trailing flexible array. These can be fixed in the future by refactoring such cases to use a new set of flexible array structure helpers to perform the common serialization/deserialization code patterns doing allocation and/or copying. - memcpy() with raw pointers (e.g. void *, char *, etc), or otherwise having their buffer size unknown at compile time, have no good mitigation beyond memory tagging (and even that would only protect against inter-object overflow, not intra-object neighboring field overflows), or refactoring. Some kind of "fat pointer" solution is likely needed to gain proper size-of-buffer awareness. (e.g. see struct membuf) - type confusion where a higher level type's allocation size does not match the resulting cast type eventually passed to a deeper memcpy() call where the compiler cannot see the true type. In theory, greater static analysis could catch these, and the use of -Warray-bounds will help find some of these. [0] https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html [1] https://git.kernel.org/linus/6a39e62abbafd1d58d1722f40c7d26ef379c6a2f Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 109 ++++++++++++++++++++++--- lib/Makefile | 3 +- lib/string_helpers.c | 6 ++ lib/test_fortify/read_overflow2_field-memcpy.c | 5 ++ lib/test_fortify/write_overflow_field-memcpy.c | 5 ++ 5 files changed, 115 insertions(+), 13 deletions(-) create mode 100644 lib/test_fortify/read_overflow2_field-memcpy.c create mode 100644 lib/test_fortify/write_overflow_field-memcpy.c (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index a6cd6815f249..f578d00403ad 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -8,7 +8,9 @@ void fortify_panic(const char *name) __noreturn __cold; void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)"); void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)"); +void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?"); void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)"); +void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?"); #define __compiletime_strlen(p) \ ({ \ @@ -209,22 +211,105 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) return __underlying_memset(p, c, size); } -__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +/* + * To make sure the compiler can enforce protection against buffer overflows, + * memcpy(), memmove(), and memset() must not be used beyond individual + * struct members. If you need to copy across multiple members, please use + * struct_group() to create a named mirror of an anonymous struct union. + * (e.g. see struct sk_buff.) Read overflow checking is currently only + * done when a write overflow is also present, or when building with W=1. + * + * Mitigation coverage matrix + * Bounds checking at: + * +-------+-------+-------+-------+ + * | Compile time | Run time | + * memcpy() argument sizes: | write | read | write | read | + * dest source length +-------+-------+-------+-------+ + * memcpy(known, known, constant) | y | y | n/a | n/a | + * memcpy(known, unknown, constant) | y | n | n/a | V | + * memcpy(known, known, dynamic) | n | n | B | B | + * memcpy(known, unknown, dynamic) | n | n | B | V | + * memcpy(unknown, known, constant) | n | y | V | n/a | + * memcpy(unknown, unknown, constant) | n | n | V | V | + * memcpy(unknown, known, dynamic) | n | n | V | B | + * memcpy(unknown, unknown, dynamic) | n | n | V | V | + * +-------+-------+-------+-------+ + * + * y = perform deterministic compile-time bounds checking + * n = cannot perform deterministic compile-time bounds checking + * n/a = no run-time bounds checking needed since compile-time deterministic + * B = can perform run-time bounds checking (currently unimplemented) + * V = vulnerable to run-time overflow (will need refactoring to solve) + * + */ +__FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, + const size_t p_size, + const size_t q_size, + const size_t p_size_field, + const size_t q_size_field, + const char *func) { - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) + /* + * Length argument is a constant expression, so we + * can perform compile-time bounds checking where + * buffer sizes are known. + */ + + /* Error when size is larger than enclosing struct. */ + if (p_size > p_size_field && p_size < size) __write_overflow(); - if (q_size < size) + if (q_size > q_size_field && q_size < size) __read_overflow2(); + + /* Warn when write size argument larger than dest field. */ + if (p_size_field < size) + __write_overflow_field(p_size_field, size); + /* + * Warn for source field over-read when building with W=1 + * or when an over-write happened, so both can be fixed at + * the same time. + */ + if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || p_size_field < size) && + q_size_field < size) + __read_overflow2_field(q_size_field, size); } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memcpy(p, q, size); + /* + * At this point, length argument may not be a constant expression, + * so run-time bounds checking can be done where buffer sizes are + * known. (This is not an "else" because the above checks may only + * be compile-time warnings, and we want to still warn for run-time + * overflows.) + */ + + /* + * Always stop accesses beyond the struct that contains the + * field, when the buffer's remaining size is known. + * (The -1 test is to optimize away checks where the buffer + * lengths are unknown.) + */ + if ((p_size != (size_t)(-1) && p_size < size) || + (q_size != (size_t)(-1) && q_size < size)) + fortify_panic(func); } +#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ + p_size_field, q_size_field, op) ({ \ + size_t __fortify_size = (size_t)(size); \ + fortify_memcpy_chk(__fortify_size, p_size, q_size, \ + p_size_field, q_size_field, #op); \ + __underlying_##op(p, q, __fortify_size); \ +}) + +/* + * __builtin_object_size() must be captured here to avoid evaluating argument + * side-effects further into the macro layers. + */ +#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \ + __builtin_object_size(p, 0), __builtin_object_size(q, 0), \ + __builtin_object_size(p, 1), __builtin_object_size(q, 1), \ + memcpy) + __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -304,13 +389,14 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) return __real_kmemdup(p, size, gfp); } -/* defined after fortified strlen and memcpy to reuse them */ +/* Defined after fortified strlen to reuse it. */ __FORTIFY_INLINE char *strcpy(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); size_t size; + /* If neither buffer size is known, immediately give up. */ if (p_size == (size_t)-1 && q_size == (size_t)-1) return __underlying_strcpy(p, q); size = strlen(q) + 1; @@ -320,14 +406,13 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) /* Run-time check for dynamic size overflow. */ if (p_size < size) fortify_panic(__func__); - memcpy(p, q, size); + __underlying_memcpy(p, q, size); return p; } /* Don't use these outside the FORITFY_SOURCE implementation */ #undef __underlying_memchr #undef __underlying_memcmp -#undef __underlying_memcpy #undef __underlying_memmove #undef __underlying_memset #undef __underlying_strcat diff --git a/lib/Makefile b/lib/Makefile index 300f569c626b..a99ce004812a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -374,7 +374,8 @@ TEST_FORTIFY_LOG = test_fortify.log quiet_cmd_test_fortify = TEST $@ cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \ $< $@ "$(NM)" $(CC) $(c_flags) \ - $(call cc-disable-warning,fortify-source) + $(call cc-disable-warning,fortify-source) \ + -DKBUILD_EXTRA_WARN1 targets += $(TEST_FORTIFY_LOGS) clean-files += $(TEST_FORTIFY_LOGS) diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 90f9f1b7afec..4f877e9551d5 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -968,6 +968,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, EXPORT_SYMBOL(memcpy_and_pad); #ifdef CONFIG_FORTIFY_SOURCE +/* These are placeholders for fortify compile-time warnings. */ +void __read_overflow2_field(size_t avail, size_t wanted) { } +EXPORT_SYMBOL(__read_overflow2_field); +void __write_overflow_field(size_t avail, size_t wanted) { } +EXPORT_SYMBOL(__write_overflow_field); + void fortify_panic(const char *name) { pr_emerg("detected buffer overflow in %s\n", name); diff --git a/lib/test_fortify/read_overflow2_field-memcpy.c b/lib/test_fortify/read_overflow2_field-memcpy.c new file mode 100644 index 000000000000..de9569266223 --- /dev/null +++ b/lib/test_fortify/read_overflow2_field-memcpy.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define TEST \ + memcpy(large, instance.buf, sizeof(instance.buf) + 1) + +#include "test_fortify.h" diff --git a/lib/test_fortify/write_overflow_field-memcpy.c b/lib/test_fortify/write_overflow_field-memcpy.c new file mode 100644 index 000000000000..28cc81058dd3 --- /dev/null +++ b/lib/test_fortify/write_overflow_field-memcpy.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define TEST \ + memcpy(instance.buf, large, sizeof(instance.buf) + 1) + +#include "test_fortify.h" -- cgit v1.2.3-71-gd317 From 938a000e3f9bead24ea753286b3e4d2423275c9e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Jun 2021 14:48:19 -0700 Subject: fortify: Detect struct member overflows in memmove() at compile-time As done for memcpy(), also update memmove() to use the same tightened compile-time checks under CONFIG_FORTIFY_SOURCE. Signed-off-by: Kees Cook --- arch/x86/boot/compressed/misc.c | 3 ++- arch/x86/lib/memcpy_32.c | 1 + include/linux/fortify-string.h | 21 ++++----------------- lib/test_fortify/read_overflow2_field-memmove.c | 5 +++++ lib/test_fortify/write_overflow_field-memmove.c | 5 +++++ 5 files changed, 17 insertions(+), 18 deletions(-) create mode 100644 lib/test_fortify/read_overflow2_field-memmove.c create mode 100644 lib/test_fortify/write_overflow_field-memmove.c (limited to 'include/linux') diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index a4339cb2d247..1cdcaf34ee36 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -37,10 +37,11 @@ * try to define their own functions if these are not defined as macros. */ #define memzero(s, n) memset((s), 0, (n)) +#ifndef memmove #define memmove memmove - /* Functions used by the included decompressor code below. */ void *memmove(void *dest, const void *src, size_t n); +#endif /* * This is set up by the setup-routine at boot-time diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 3a6e6cfe8c35..ef3af7ff2c8a 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -4,6 +4,7 @@ #undef memcpy #undef memset +#undef memmove __visible void *memcpy(void *to, const void *from, size_t n) { diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index f578d00403ad..098d8a322a7a 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -309,22 +309,10 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, __builtin_object_size(p, 0), __builtin_object_size(q, 0), \ __builtin_object_size(p, 1), __builtin_object_size(q, 1), \ memcpy) - -__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - - if (__builtin_constant_p(size)) { - if (p_size < size) - __write_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memmove(p, q, size); -} +#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \ + __builtin_object_size(p, 0), __builtin_object_size(q, 0), \ + __builtin_object_size(p, 1), __builtin_object_size(q, 1), \ + memmove) extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); __FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) @@ -413,7 +401,6 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) /* Don't use these outside the FORITFY_SOURCE implementation */ #undef __underlying_memchr #undef __underlying_memcmp -#undef __underlying_memmove #undef __underlying_memset #undef __underlying_strcat #undef __underlying_strcpy diff --git a/lib/test_fortify/read_overflow2_field-memmove.c b/lib/test_fortify/read_overflow2_field-memmove.c new file mode 100644 index 000000000000..6cc2724c8f62 --- /dev/null +++ b/lib/test_fortify/read_overflow2_field-memmove.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define TEST \ + memmove(large, instance.buf, sizeof(instance.buf) + 1) + +#include "test_fortify.h" diff --git a/lib/test_fortify/write_overflow_field-memmove.c b/lib/test_fortify/write_overflow_field-memmove.c new file mode 100644 index 000000000000..377fcf9bb2fd --- /dev/null +++ b/lib/test_fortify/write_overflow_field-memmove.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define TEST \ + memmove(instance.buf, large, sizeof(instance.buf) + 1) + +#include "test_fortify.h" -- cgit v1.2.3-71-gd317 From 28e77cc1c0686621a4d416f599cee5ab369daa0a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Jun 2021 14:42:23 -0700 Subject: fortify: Detect struct member overflows in memset() at compile-time As done for memcpy(), also update memset() to use the same tightened compile-time bounds checking under CONFIG_FORTIFY_SOURCE. Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 54 ++++++++++++++++++++++---- lib/test_fortify/write_overflow_field-memset.c | 5 +++ 2 files changed, 51 insertions(+), 8 deletions(-) create mode 100644 lib/test_fortify/write_overflow_field-memset.c (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 098d8a322a7a..53123712bb3b 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -200,17 +200,56 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) return p; } -__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) +__FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, + const size_t p_size, + const size_t p_size_field) { - size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size)) { + /* + * Length argument is a constant expression, so we + * can perform compile-time bounds checking where + * buffer sizes are known. + */ - if (__builtin_constant_p(size) && p_size < size) - __write_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_memset(p, c, size); + /* Error when size is larger than enclosing struct. */ + if (p_size > p_size_field && p_size < size) + __write_overflow(); + + /* Warn when write size is larger than dest field. */ + if (p_size_field < size) + __write_overflow_field(p_size_field, size); + } + /* + * At this point, length argument may not be a constant expression, + * so run-time bounds checking can be done where buffer sizes are + * known. (This is not an "else" because the above checks may only + * be compile-time warnings, and we want to still warn for run-time + * overflows.) + */ + + /* + * Always stop accesses beyond the struct that contains the + * field, when the buffer's remaining size is known. + * (The -1 test is to optimize away checks where the buffer + * lengths are unknown.) + */ + if (p_size != (size_t)(-1) && p_size < size) + fortify_panic("memset"); } +#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \ + size_t __fortify_size = (size_t)(size); \ + fortify_memset_chk(__fortify_size, p_size, p_size_field), \ + __underlying_memset(p, c, __fortify_size); \ +}) + +/* + * __builtin_object_size() must be captured here to avoid evaluating argument + * side-effects further into the macro layers. + */ +#define memset(p, c, s) __fortify_memset_chk(p, c, s, \ + __builtin_object_size(p, 0), __builtin_object_size(p, 1)) + /* * To make sure the compiler can enforce protection against buffer overflows, * memcpy(), memmove(), and memset() must not be used beyond individual @@ -401,7 +440,6 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) /* Don't use these outside the FORITFY_SOURCE implementation */ #undef __underlying_memchr #undef __underlying_memcmp -#undef __underlying_memset #undef __underlying_strcat #undef __underlying_strcpy #undef __underlying_strlen diff --git a/lib/test_fortify/write_overflow_field-memset.c b/lib/test_fortify/write_overflow_field-memset.c new file mode 100644 index 000000000000..2331da26909e --- /dev/null +++ b/lib/test_fortify/write_overflow_field-memset.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define TEST \ + memset(instance.buf, 0x42, sizeof(instance.buf) + 1) + +#include "test_fortify.h" -- cgit v1.2.3-71-gd317 From f361143141362485b39eb40bb4910e3f4219180f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:43 -0800 Subject: fortify: Replace open-coded __gnu_inline attribute Replace open-coded gnu_inline attribute with the normal kernel convention for attributes: __gnu_inline Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-2-keescook@chromium.org --- include/linux/fortify-string.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 53123712bb3b..439aad24ab3b 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -2,7 +2,7 @@ #ifndef _LINUX_FORTIFY_STRING_H_ #define _LINUX_FORTIFY_STRING_H_ -#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline)) +#define __FORTIFY_INLINE extern __always_inline __gnu_inline #define __RENAME(x) __asm__(#x) void fortify_panic(const char *name) __noreturn __cold; -- cgit v1.2.3-71-gd317 From f0202b8ca48cef152d4cdf775f39be6d3b372e1e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:44 -0800 Subject: Compiler Attributes: Add __pass_object_size for Clang In order to gain greater visibility to type information when using __builtin_object_size(), Clang has a function attribute "pass_object_size" that will make size information available for marked arguments in a function by way of implicit additional function arguments that are then wired up the __builtin_object_size(). This is needed to implement FORTIFY_SOURCE in Clang, as a workaround to Clang's __builtin_object_size() having limited visibility[1] into types across function calls (even inlines). This attribute has an additional benefit that it can be used even on non-inline functions to gain argument size information. [1] https://github.com/llvm/llvm-project/issues/53516 Cc: Nick Desaulniers Cc: Nathan Chancellor Cc: llvm@lists.linux.dev Reviewed-by: Miguel Ojeda Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-3-keescook@chromium.org --- include/linux/compiler_attributes.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 37e260020221..d0c503772061 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -263,6 +263,20 @@ */ #define __packed __attribute__((__packed__)) +/* + * Note: the "type" argument should match any __builtin_object_size(p, type) usage. + * + * Optional: not supported by gcc. + * Optional: not supported by icc. + * + * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size + */ +#if __has_attribute(__pass_object_size__) +# define __pass_object_size(type) __attribute__((__pass_object_size__(type))) +#else +# define __pass_object_size(type) +#endif + /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute */ -- cgit v1.2.3-71-gd317 From d694dbaefd6fa6bdde84b704779eab53942753a5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:45 -0800 Subject: Compiler Attributes: Add __overloadable for Clang In order for FORTIFY_SOURCE to use __pass_object_size on an "extern inline" function, as all the fortified string functions are, the functions must be marked as being overloadable (i.e. different prototypes due to the implicitly injected object size arguments). This allows the __pass_object_size versions to take precedence. Cc: Nathan Chancellor Cc: llvm@lists.linux.dev Reviewed-by: Miguel Ojeda Reviewed-by: Nick Desaulniers Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220208225350.1331628-4-keescook@chromium.org --- include/linux/compiler_attributes.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index d0c503772061..dcaf55f5d1ae 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -257,6 +257,18 @@ */ #define __noreturn __attribute__((__noreturn__)) +/* + * Optional: not supported by gcc. + * Optional: not supported by icc. + * + * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable + */ +#if __has_attribute(__overloadable__) +# define __overloadable __attribute__((__overloadable__)) +#else +# define __overloadable +#endif + /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute -- cgit v1.2.3-71-gd317 From 1c7f4e5c1b6c9d9b508007c141dca77cab9434b4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:46 -0800 Subject: Compiler Attributes: Add __diagnose_as for Clang Clang will perform various compile-time diagnostics on uses of various functions (e.g. simple bounds-checking on strcpy(), etc). These diagnostics can be assigned to other functions (for example, new implementations of the string functions under CONFIG_FORTIFY_SOURCE) using the "diagnose_as_builtin" attribute. This allows those functions to retain their compile-time diagnostic warnings. Cc: Nathan Chancellor Cc: llvm@lists.linux.dev Reviewed-by: Miguel Ojeda Reviewed-by: Nick Desaulniers Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220208225350.1331628-5-keescook@chromium.org --- include/linux/compiler_attributes.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index dcaf55f5d1ae..445e80517cab 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -100,6 +100,19 @@ # define __copy(symbol) #endif +/* + * Optional: not supported by gcc + * Optional: only supported since clang >= 14.0 + * Optional: not supported by icc + * + * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin + */ +#if __has_attribute(__diagnose_as_builtin__) +# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin))) +#else +# define __diagnose_as(builtin...) +#endif + /* * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' * attribute warnings entirely and for good") for more information. -- cgit v1.2.3-71-gd317 From 0a2b782a00f33e7d06dc43d099fa071ae97bee77 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:47 -0800 Subject: fortify: Make pointer arguments const In preparation for using Clang's __pass_object_size attribute, make all the pointer arguments to the fortified string functions const. Nothing was changing their values anyway, so this added requirement (needed by __pass_object_size) requires no code changes and has no impact on the binary instruction output. Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-6-keescook@chromium.org --- include/linux/fortify-string.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 439aad24ab3b..f874ada4b9af 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -50,7 +50,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define __underlying_strncpy __builtin_strncpy #endif -__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +__FORTIFY_INLINE char *strncpy(char * const p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 1); @@ -61,7 +61,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) return __underlying_strncpy(p, q, size); } -__FORTIFY_INLINE char *strcat(char *p, const char *q) +__FORTIFY_INLINE char *strcat(char * const p, const char *q) { size_t p_size = __builtin_object_size(p, 1); @@ -73,7 +73,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q) } extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); -__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +__FORTIFY_INLINE __kernel_size_t strnlen(const char * const p, __kernel_size_t maxlen) { size_t p_size = __builtin_object_size(p, 1); size_t p_len = __compiletime_strlen(p); @@ -94,7 +94,7 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) } /* defined after fortified strnlen to reuse it. */ -__FORTIFY_INLINE __kernel_size_t strlen(const char *p) +__FORTIFY_INLINE __kernel_size_t strlen(const char * const p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 1); @@ -110,7 +110,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* defined after fortified strlen to reuse it */ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); -__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +__FORTIFY_INLINE size_t strlcpy(char * const p, const char * const q, size_t size) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); @@ -137,7 +137,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) /* defined after fortified strnlen to reuse it */ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); -__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) +__FORTIFY_INLINE ssize_t strscpy(char * const p, const char * const q, size_t size) { size_t len; /* Use string size rather than possible enclosing struct size. */ @@ -183,7 +183,7 @@ __FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) } /* defined after fortified strlen and strnlen to reuse them */ -__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) +__FORTIFY_INLINE char *strncat(char * const p, const char * const q, __kernel_size_t count) { size_t p_len, copy_len; size_t p_size = __builtin_object_size(p, 1); @@ -354,7 +354,7 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, memmove) extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); -__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) +__FORTIFY_INLINE void *memscan(void * const p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -365,7 +365,7 @@ __FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) return __real_memscan(p, c, size); } -__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) +__FORTIFY_INLINE int memcmp(const void * const p, const void * const q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); @@ -381,7 +381,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) return __underlying_memcmp(p, q, size); } -__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +__FORTIFY_INLINE void *memchr(const void * const p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -393,7 +393,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); -__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) +__FORTIFY_INLINE void *memchr_inv(const void * const p, int c, size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -405,7 +405,7 @@ __FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) } extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); -__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +__FORTIFY_INLINE void *kmemdup(const void * const p, size_t size, gfp_t gfp) { size_t p_size = __builtin_object_size(p, 0); @@ -417,7 +417,7 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) } /* Defined after fortified strlen to reuse it. */ -__FORTIFY_INLINE char *strcpy(char *p, const char *q) +__FORTIFY_INLINE char *strcpy(char * const p, const char * const q) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); -- cgit v1.2.3-71-gd317 From 92df138a8d663cefebc3124041253677a53c92cf Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:48 -0800 Subject: fortify: Use __diagnose_as() for better diagnostic coverage In preparation for using Clang's __pass_object_size, add __diagnose_as() attributes to mark the functions as being the same as the indicated builtins. When __daignose_as() is available, Clang will have a more complete ability to apply its own diagnostic analysis to callers of these functions, as if they were the builtins themselves. Without __diagnose_as, Clang's compile time diagnostic messages won't be as precise as they could be, but at least users of older toolchains will still benefit from having fortified routines. Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-7-keescook@chromium.org --- include/linux/fortify-string.h | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index f874ada4b9af..db1ad1c1c79a 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -50,7 +50,8 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define __underlying_strncpy __builtin_strncpy #endif -__FORTIFY_INLINE char *strncpy(char * const p, const char *q, __kernel_size_t size) +__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3) +char *strncpy(char * const p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 1); @@ -61,7 +62,8 @@ __FORTIFY_INLINE char *strncpy(char * const p, const char *q, __kernel_size_t si return __underlying_strncpy(p, q, size); } -__FORTIFY_INLINE char *strcat(char * const p, const char *q) +__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2) +char *strcat(char * const p, const char *q) { size_t p_size = __builtin_object_size(p, 1); @@ -94,7 +96,8 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const p, __kernel_size_t m } /* defined after fortified strnlen to reuse it. */ -__FORTIFY_INLINE __kernel_size_t strlen(const char * const p) +__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1) +__kernel_size_t strlen(const char * const p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 1); @@ -183,7 +186,8 @@ __FORTIFY_INLINE ssize_t strscpy(char * const p, const char * const q, size_t si } /* defined after fortified strlen and strnlen to reuse them */ -__FORTIFY_INLINE char *strncat(char * const p, const char * const q, __kernel_size_t count) +__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3) +char *strncat(char * const p, const char * const q, __kernel_size_t count) { size_t p_len, copy_len; size_t p_size = __builtin_object_size(p, 1); @@ -365,7 +369,8 @@ __FORTIFY_INLINE void *memscan(void * const p, int c, __kernel_size_t size) return __real_memscan(p, c, size); } -__FORTIFY_INLINE int memcmp(const void * const p, const void * const q, __kernel_size_t size) +__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3) +int memcmp(const void * const p, const void * const q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); @@ -381,7 +386,8 @@ __FORTIFY_INLINE int memcmp(const void * const p, const void * const q, __kernel return __underlying_memcmp(p, q, size); } -__FORTIFY_INLINE void *memchr(const void * const p, int c, __kernel_size_t size) +__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3) +void *memchr(const void * const p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -417,7 +423,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const p, size_t size, gfp_t gfp) } /* Defined after fortified strlen to reuse it. */ -__FORTIFY_INLINE char *strcpy(char * const p, const char * const q) +__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2) +char *strcpy(char * const p, const char * const q) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); -- cgit v1.2.3-71-gd317 From 67ebc3ab446230c77fe3b545a9d8a11cac1cfb6e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:49 -0800 Subject: fortify: Make sure strlen() may still be used as a constant expression In preparation for enabling Clang FORTIFY_SOURCE support, redefine strlen() as a macro that tests for being a constant expression so that strlen() can still be used in static initializers, which is lost when adding __pass_object_size and __overloadable. An example of this usage can be seen here: https://lore.kernel.org/all/202201252321.dRmWZ8wW-lkp@intel.com/ Notably, this constant expression feature of strlen() is not available for architectures that build with -ffreestanding. This means the kernel currently does not universally expect strlen() to be used this way, but since there _are_ some build configurations that depend on it, retain the characteristic for Clang FORTIFY_SOURCE builds too. Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-8-keescook@chromium.org --- include/linux/fortify-string.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index db1ad1c1c79a..f77cf22e2d60 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -2,6 +2,8 @@ #ifndef _LINUX_FORTIFY_STRING_H_ #define _LINUX_FORTIFY_STRING_H_ +#include + #define __FORTIFY_INLINE extern __always_inline __gnu_inline #define __RENAME(x) __asm__(#x) @@ -95,9 +97,16 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const p, __kernel_size_t m return ret; } -/* defined after fortified strnlen to reuse it. */ +/* + * Defined after fortified strnlen to reuse it. However, it must still be + * possible for strlen() to be used on compile-time strings for use in + * static initializers (i.e. as a constant expression). + */ +#define strlen(p) \ + __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \ + __builtin_strlen(p), __fortify_strlen(p)) __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1) -__kernel_size_t strlen(const char * const p) +__kernel_size_t __fortify_strlen(const char * const p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 1); -- cgit v1.2.3-71-gd317 From 281d0c962752fb40866dd8d4cade68656f34bd1f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 8 Feb 2022 14:53:50 -0800 Subject: fortify: Add Clang support Enable FORTIFY_SOURCE support for Clang: Use the new __pass_object_size and __overloadable attributes so that Clang will have appropriate visibility into argument sizes such that __builtin_object_size(p, 1) will behave correctly. Additional details available here: https://github.com/llvm/llvm-project/issues/53516 https://github.com/ClangBuiltLinux/linux/issues/1401 A bug with __builtin_constant_p() of globally defined variables was fixed in Clang 13 (and backported to 12.0.1), so FORTIFY support must depend on that version or later. Additional details here: https://bugs.llvm.org/show_bug.cgi?id=41459 commit a52f8a59aef4 ("fortify: Explicitly disable Clang support") A bug with Clang's -mregparm=3 and -m32 makes some builtins unusable, so removing -ffreestanding (to gain the needed libcall optimizations with Clang) cannot be done. Without the libcall optimizations, Clang cannot provide appropriate FORTIFY coverage, so it must be disabled for CONFIG_X86_32. Additional details here; https://github.com/llvm/llvm-project/issues/53645 Cc: Miguel Ojeda Cc: Nick Desaulniers Cc: Nathan Chancellor Cc: George Burgess IV Cc: llvm@lists.linux.dev Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20220208225350.1331628-9-keescook@chromium.org --- include/linux/fortify-string.h | 40 ++++++++++++++++++++++++++-------------- security/Kconfig | 5 +++-- 2 files changed, 29 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index f77cf22e2d60..295637a66c46 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -4,7 +4,7 @@ #include -#define __FORTIFY_INLINE extern __always_inline __gnu_inline +#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable #define __RENAME(x) __asm__(#x) void fortify_panic(const char *name) __noreturn __cold; @@ -52,8 +52,17 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define __underlying_strncpy __builtin_strncpy #endif +/* + * Clang's use of __builtin_object_size() within inlines needs hinting via + * __pass_object_size(). The preference is to only ever use type 1 (member + * size, rather than struct size), but there remain some stragglers using + * type 0 that will be converted in the future. + */ +#define POS __pass_object_size(1) +#define POS0 __pass_object_size(0) + __FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3) -char *strncpy(char * const p, const char *q, __kernel_size_t size) +char *strncpy(char * const POS p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 1); @@ -65,7 +74,7 @@ char *strncpy(char * const p, const char *q, __kernel_size_t size) } __FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2) -char *strcat(char * const p, const char *q) +char *strcat(char * const POS p, const char *q) { size_t p_size = __builtin_object_size(p, 1); @@ -77,7 +86,7 @@ char *strcat(char * const p, const char *q) } extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); -__FORTIFY_INLINE __kernel_size_t strnlen(const char * const p, __kernel_size_t maxlen) +__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen) { size_t p_size = __builtin_object_size(p, 1); size_t p_len = __compiletime_strlen(p); @@ -106,7 +115,7 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const p, __kernel_size_t m __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \ __builtin_strlen(p), __fortify_strlen(p)) __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1) -__kernel_size_t __fortify_strlen(const char * const p) +__kernel_size_t __fortify_strlen(const char * const POS p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 1); @@ -122,7 +131,7 @@ __kernel_size_t __fortify_strlen(const char * const p) /* defined after fortified strlen to reuse it */ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); -__FORTIFY_INLINE size_t strlcpy(char * const p, const char * const q, size_t size) +__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); @@ -149,7 +158,7 @@ __FORTIFY_INLINE size_t strlcpy(char * const p, const char * const q, size_t siz /* defined after fortified strnlen to reuse it */ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); -__FORTIFY_INLINE ssize_t strscpy(char * const p, const char * const q, size_t size) +__FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size) { size_t len; /* Use string size rather than possible enclosing struct size. */ @@ -196,7 +205,7 @@ __FORTIFY_INLINE ssize_t strscpy(char * const p, const char * const q, size_t si /* defined after fortified strlen and strnlen to reuse them */ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3) -char *strncat(char * const p, const char * const q, __kernel_size_t count) +char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count) { size_t p_len, copy_len; size_t p_size = __builtin_object_size(p, 1); @@ -367,7 +376,7 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size, memmove) extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); -__FORTIFY_INLINE void *memscan(void * const p, int c, __kernel_size_t size) +__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -379,7 +388,7 @@ __FORTIFY_INLINE void *memscan(void * const p, int c, __kernel_size_t size) } __FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3) -int memcmp(const void * const p, const void * const q, __kernel_size_t size) +int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); @@ -396,7 +405,7 @@ int memcmp(const void * const p, const void * const q, __kernel_size_t size) } __FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3) -void *memchr(const void * const p, int c, __kernel_size_t size) +void *memchr(const void * const POS0 p, int c, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -408,7 +417,7 @@ void *memchr(const void * const p, int c, __kernel_size_t size) } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); -__FORTIFY_INLINE void *memchr_inv(const void * const p, int c, size_t size) +__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -420,7 +429,7 @@ __FORTIFY_INLINE void *memchr_inv(const void * const p, int c, size_t size) } extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); -__FORTIFY_INLINE void *kmemdup(const void * const p, size_t size, gfp_t gfp) +__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp) { size_t p_size = __builtin_object_size(p, 0); @@ -433,7 +442,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const p, size_t size, gfp_t gfp) /* Defined after fortified strlen to reuse it. */ __FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2) -char *strcpy(char * const p, const char * const q) +char *strcpy(char * const POS p, const char * const POS q) { size_t p_size = __builtin_object_size(p, 1); size_t q_size = __builtin_object_size(q, 1); @@ -462,4 +471,7 @@ char *strcpy(char * const p, const char * const q) #undef __underlying_strncat #undef __underlying_strncpy +#undef POS +#undef POS0 + #endif /* _LINUX_FORTIFY_STRING_H_ */ diff --git a/security/Kconfig b/security/Kconfig index 0b847f435beb..1d2d71cc1f36 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -177,9 +177,10 @@ config HARDENED_USERCOPY_PAGESPAN config FORTIFY_SOURCE bool "Harden common str/mem functions against buffer overflows" depends on ARCH_HAS_FORTIFY_SOURCE - # https://bugs.llvm.org/show_bug.cgi?id=50322 # https://bugs.llvm.org/show_bug.cgi?id=41459 - depends on !CC_IS_CLANG + depends on !CC_IS_CLANG || CLANG_VERSION >= 120001 + # https://github.com/llvm/llvm-project/issues/53645 + depends on !CC_IS_CLANG || !X86_32 help Detect overflows of buffers in common string and memory functions where the compiler can determine and validate the buffer sizes. -- cgit v1.2.3-71-gd317 From ddbd89deb7d32b1fbb879f48d68fda1a8ac58e8e Mon Sep 17 00:00:00 2001 From: Halil Pasic Date: Fri, 11 Feb 2022 02:12:52 +0100 Subject: swiotlb: fix info leak with DMA_FROM_DEVICE The problem I'm addressing was discovered by the LTP test covering cve-2018-1000204. A short description of what happens follows: 1) The test case issues a command code 00 (TEST UNIT READY) via the SG_IO interface with: dxfer_len == 524288, dxdfer_dir == SG_DXFER_FROM_DEV and a corresponding dxferp. The peculiar thing about this is that TUR is not reading from the device. 2) In sg_start_req() the invocation of blk_rq_map_user() effectively bounces the user-space buffer. As if the device was to transfer into it. Since commit a45b599ad808 ("scsi: sg: allocate with __GFP_ZERO in sg_build_indirect()") we make sure this first bounce buffer is allocated with GFP_ZERO. 3) For the rest of the story we keep ignoring that we have a TUR, so the device won't touch the buffer we prepare as if the we had a DMA_FROM_DEVICE type of situation. My setup uses a virtio-scsi device and the buffer allocated by SG is mapped by the function virtqueue_add_split() which uses DMA_FROM_DEVICE for the "in" sgs (here scatter-gather and not scsi generics). This mapping involves bouncing via the swiotlb (we need swiotlb to do virtio in protected guest like s390 Secure Execution, or AMD SEV). 4) When the SCSI TUR is done, we first copy back the content of the second (that is swiotlb) bounce buffer (which most likely contains some previous IO data), to the first bounce buffer, which contains all zeros. Then we copy back the content of the first bounce buffer to the user-space buffer. 5) The test case detects that the buffer, which it zero-initialized, ain't all zeros and fails. One can argue that this is an swiotlb problem, because without swiotlb we leak all zeros, and the swiotlb should be transparent in a sense that it does not affect the outcome (if all other participants are well behaved). Copying the content of the original buffer into the swiotlb buffer is the only way I can think of to make swiotlb transparent in such scenarios. So let's do just that if in doubt, but allow the driver to tell us that the whole mapped buffer is going to be overwritten, in which case we can preserve the old behavior and avoid the performance impact of the extra bounce. Signed-off-by: Halil Pasic Signed-off-by: Christoph Hellwig --- Documentation/core-api/dma-attributes.rst | 8 ++++++++ include/linux/dma-mapping.h | 8 ++++++++ kernel/dma/swiotlb.c | 3 ++- 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst index 1887d92e8e92..17706dc91ec9 100644 --- a/Documentation/core-api/dma-attributes.rst +++ b/Documentation/core-api/dma-attributes.rst @@ -130,3 +130,11 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged subsystem that the buffer is fully accessible at the elevated privilege level (and ideally inaccessible or at least read-only at the lesser-privileged levels). + +DMA_ATTR_OVERWRITE +------------------ + +This is a hint to the DMA-mapping subsystem that the device is expected to +overwrite the entire mapped size, thus the caller does not require any of the +previous buffer contents to be preserved. This allows bounce-buffering +implementations to optimise DMA_FROM_DEVICE transfers. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dca2b1355bb1..6150d11a607e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -61,6 +61,14 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) +/* + * This is a hint to the DMA-mapping subsystem that the device is expected + * to overwrite the entire mapped size, thus the caller does not require any + * of the previous buffer contents to be preserved. This allows + * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. + */ +#define DMA_ATTR_OVERWRITE (1UL << 10) + /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can * be given to a device to use as a DMA source or target. It is specific to a diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index f1e7ea160b43..bfc56cb21705 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -628,7 +628,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); tlb_addr = slot_addr(mem->start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || + dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } -- cgit v1.2.3-71-gd317 From cd149eff8d2201a63c074a6d9d03e52926aa535d Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 9 Feb 2022 15:27:04 +0300 Subject: mtd: spi-nor: intel-spi: Disable write protection only if asked Currently the driver tries to disable the BIOS write protection automatically even if this is not what the user wants. For this reason modify the driver so that by default it does not touch the write protection. Only if specifically asked by the user (setting writeable=1 command line parameter) the driver tries to disable the BIOS write protection. Signed-off-by: Mika Westerberg Reviewed-by: Andy Shevchenko Reviewed-by: Mauro Lima Reviewed-by: Tudor Ambarus Acked-by: Lee Jones Link: https://lore.kernel.org/r/20220209122706.42439-2-mika.westerberg@linux.intel.com Signed-off-by: Mark Brown --- drivers/mfd/lpc_ich.c | 59 ++++++++++++++++++++++--- drivers/mtd/spi-nor/controllers/intel-spi-pci.c | 29 +++++++----- drivers/mtd/spi-nor/controllers/intel-spi.c | 41 ++++++++--------- include/linux/platform_data/x86/intel-spi.h | 6 ++- 4 files changed, 96 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index f10e53187f67..9ffab9aafd81 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -63,6 +63,8 @@ #define SPIBASE_BYT 0x54 #define SPIBASE_BYT_SZ 512 #define SPIBASE_BYT_EN BIT(1) +#define BYT_BCR 0xfc +#define BYT_BCR_WPD BIT(0) #define SPIBASE_LPT 0x3800 #define SPIBASE_LPT_SZ 512 @@ -1084,12 +1086,57 @@ wdt_done: return ret; } +static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data) +{ + u32 val; + + val = readl(base + BYT_BCR); + if (!(val & BYT_BCR_WPD)) { + val |= BYT_BCR_WPD; + writel(val, base + BYT_BCR); + val = readl(base + BYT_BCR); + } + + return val & BYT_BCR_WPD; +} + +static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + +static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data) +{ + unsigned int spi = PCI_DEVFN(13, 2); + struct pci_bus *bus = data; + u32 bcr; + + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_bus_write_config_dword(bus, spi, BCR, bcr); + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static int lpc_ich_init_spi(struct pci_dev *dev) { struct lpc_ich_priv *priv = pci_get_drvdata(dev); struct resource *res = &intel_spi_res[0]; struct intel_spi_boardinfo *info; - u32 spi_base, rcba, bcr; + u32 spi_base, rcba; info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -1103,6 +1150,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) if (spi_base & SPIBASE_BYT_EN) { res->start = spi_base & ~(SPIBASE_BYT_SZ - 1); res->end = res->start + SPIBASE_BYT_SZ - 1; + + info->set_writeable = lpc_ich_byt_set_writeable; } break; @@ -1113,8 +1162,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base + SPIBASE_LPT; res->end = res->start + SPIBASE_LPT_SZ - 1; - pci_read_config_dword(dev, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_lpt_set_writeable; + info->data = dev; } break; @@ -1135,8 +1184,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base & 0xfffffff0; res->end = res->start + SPIBASE_APL_SZ - 1; - pci_bus_read_config_dword(bus, spi, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_bxt_set_writeable; + info->data = bus; } pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c index 1bc53b8bb88a..508f7ca098ef 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -16,12 +16,30 @@ #define BCR 0xdc #define BCR_WPD BIT(0) +static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static const struct intel_spi_boardinfo bxt_info = { .type = INTEL_SPI_BXT, + .set_writeable = intel_spi_pci_set_writeable, }; static const struct intel_spi_boardinfo cnl_info = { .type = INTEL_SPI_CNL, + .set_writeable = intel_spi_pci_set_writeable, }; static int intel_spi_pci_probe(struct pci_dev *pdev, @@ -29,7 +47,6 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, { struct intel_spi_boardinfo *info; struct intel_spi *ispi; - u32 bcr; int ret; ret = pcim_enable_device(pdev); @@ -41,15 +58,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, if (!info) return -ENOMEM; - /* Try to make the chip read/write */ - pci_read_config_dword(pdev, BCR, &bcr); - if (!(bcr & BCR_WPD)) { - bcr |= BCR_WPD; - pci_write_config_dword(pdev, BCR, bcr); - pci_read_config_dword(pdev, BCR, &bcr); - } - info->writeable = !!(bcr & BCR_WPD); - + info->data = pdev; ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); if (IS_ERR(ispi)) return PTR_ERR(ispi); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c index a413892ff449..f35597cbea0c 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -131,7 +131,6 @@ * @sregs: Start of software sequencer registers * @nregions: Maximum number of regions * @pr_num: Maximum number of protected range registers - * @writeable: Is the chip writeable * @locked: Is SPI setting locked * @swseq_reg: Use SW sequencer in register reads/writes * @swseq_erase: Use SW sequencer in erase operation @@ -149,7 +148,6 @@ struct intel_spi { void __iomem *sregs; size_t nregions; size_t pr_num; - bool writeable; bool locked; bool swseq_reg; bool swseq_erase; @@ -304,6 +302,14 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) INTEL_SPI_TIMEOUT * 1000); } +static bool intel_spi_set_writeable(struct intel_spi *ispi) +{ + if (!ispi->info->set_writeable) + return false; + + return ispi->info->set_writeable(ispi->base, ispi->info->data); +} + static int intel_spi_init(struct intel_spi *ispi) { u32 opmenu0, opmenu1, lvscc, uvscc, val; @@ -316,19 +322,6 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->nregions = BYT_FREG_NUM; ispi->pr_num = BYT_PR_NUM; ispi->swseq_reg = true; - - if (writeable) { - /* Disable write protection */ - val = readl(ispi->base + BYT_BCR); - if (!(val & BYT_BCR_WPD)) { - val |= BYT_BCR_WPD; - writel(val, ispi->base + BYT_BCR); - val = readl(ispi->base + BYT_BCR); - } - - ispi->writeable = !!(val & BYT_BCR_WPD); - } - break; case INTEL_SPI_LPT: @@ -358,6 +351,12 @@ static int intel_spi_init(struct intel_spi *ispi) return -EINVAL; } + /* Try to disable write protection if user asked to do so */ + if (writeable && !intel_spi_set_writeable(ispi)) { + dev_warn(ispi->dev, "can't disable chip write protection\n"); + writeable = false; + } + /* Disable #SMI generation from HW sequencer */ val = readl(ispi->base + HSFSTS_CTL); val &= ~HSFSTS_CTL_FSMIE; @@ -884,9 +883,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, /* * If any of the regions have protection bits set, make the * whole partition read-only to be on the safe side. + * + * Also if the user did not ask the chip to be writeable + * mask the bit too. */ - if (intel_spi_is_protected(ispi, base, limit)) - ispi->writeable = false; + if (!writeable || intel_spi_is_protected(ispi, base, limit)) + part->mask_flags |= MTD_WRITEABLE; end = (limit << 12) + 4096; if (end > part->size) @@ -927,7 +929,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->dev = dev; ispi->info = info; - ispi->writeable = info->writeable; ret = intel_spi_init(ispi); if (ret) @@ -945,10 +946,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, intel_spi_fill_partition(ispi, &part); - /* Prevent writes if not explicitly enabled */ - if (!ispi->writeable || !writeable) - ispi->nor.mtd.flags &= ~MTD_WRITEABLE; - ret = mtd_device_register(&ispi->nor.mtd, &part, 1); if (ret) return ERR_PTR(ret); diff --git a/include/linux/platform_data/x86/intel-spi.h b/include/linux/platform_data/x86/intel-spi.h index 7f53a5c6f35e..7dda3f690465 100644 --- a/include/linux/platform_data/x86/intel-spi.h +++ b/include/linux/platform_data/x86/intel-spi.h @@ -19,11 +19,13 @@ enum intel_spi_type { /** * struct intel_spi_boardinfo - Board specific data for Intel SPI driver * @type: Type which this controller is compatible with - * @writeable: The chip is writeable + * @set_writeable: Try to make the chip writeable (optional) + * @data: Data to be passed to @set_writeable can be %NULL */ struct intel_spi_boardinfo { enum intel_spi_type type; - bool writeable; + bool (*set_writeable)(void __iomem *base, void *data); + void *data; }; #endif /* INTEL_SPI_PDATA_H */ -- cgit v1.2.3-71-gd317 From e23e5a05d1fd9479586c40ffbcc056b3e34ef816 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 9 Feb 2022 15:27:05 +0300 Subject: mtd: spi-nor: intel-spi: Convert to SPI MEM The preferred way to implement SPI-NOR controller drivers is through SPI subsubsystem utilizing the SPI MEM core functions. This converts the Intel SPI flash controller driver over the SPI MEM by moving the driver from SPI-NOR subsystem to SPI subsystem and in one go make it use the SPI MEM functions. The driver name will be changed from intel-spi to spi-intel to match the convention used in the SPI subsystem. Signed-off-by: Mika Westerberg Reviewed-by: Andy Shevchenko Reviewed-by: Mauro Lima Reviewed-by: Boris Brezillon Acked-by: Lee Jones Acked-by: Pratyush Yadav Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20220209122706.42439-3-mika.westerberg@linux.intel.com Signed-off-by: Mark Brown --- drivers/mtd/spi-nor/controllers/Kconfig | 36 - drivers/mtd/spi-nor/controllers/Makefile | 3 - drivers/mtd/spi-nor/controllers/intel-spi-pci.c | 108 -- .../mtd/spi-nor/controllers/intel-spi-platform.c | 54 - drivers/mtd/spi-nor/controllers/intel-spi.c | 965 --------------- drivers/mtd/spi-nor/controllers/intel-spi.h | 21 - drivers/spi/Kconfig | 39 + drivers/spi/Makefile | 3 + drivers/spi/spi-intel-pci.c | 94 ++ drivers/spi/spi-intel-platform.c | 39 + drivers/spi/spi-intel.c | 1250 ++++++++++++++++++++ drivers/spi/spi-intel.h | 19 + include/linux/mfd/lpc_ich.h | 2 +- include/linux/platform_data/x86/intel-spi.h | 31 - include/linux/platform_data/x86/spi-intel.h | 31 + 15 files changed, 1476 insertions(+), 1219 deletions(-) delete mode 100644 drivers/mtd/spi-nor/controllers/intel-spi-pci.c delete mode 100644 drivers/mtd/spi-nor/controllers/intel-spi-platform.c delete mode 100644 drivers/mtd/spi-nor/controllers/intel-spi.c delete mode 100644 drivers/mtd/spi-nor/controllers/intel-spi.h create mode 100644 drivers/spi/spi-intel-pci.c create mode 100644 drivers/spi/spi-intel-platform.c create mode 100644 drivers/spi/spi-intel.c create mode 100644 drivers/spi/spi-intel.h delete mode 100644 include/linux/platform_data/x86/intel-spi.h create mode 100644 include/linux/platform_data/x86/spi-intel.h (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig index 5c0e0ec2e6d1..50f4f3484d42 100644 --- a/drivers/mtd/spi-nor/controllers/Kconfig +++ b/drivers/mtd/spi-nor/controllers/Kconfig @@ -26,39 +26,3 @@ config SPI_NXP_SPIFI SPIFI is a specialized controller for connecting serial SPI Flash. Enable this option if you have a device with a SPIFI controller and want to access the Flash as a mtd device. - -config SPI_INTEL_SPI - tristate - -config SPI_INTEL_SPI_PCI - tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" - depends on X86 && PCI - select SPI_INTEL_SPI - help - This enables PCI support for the Intel PCH/PCU SPI controller in - master mode. This controller is present in modern Intel hardware - and is used to hold BIOS and other persistent settings. Using - this driver it is possible to upgrade BIOS directly from Linux. - - Say N here unless you know what you are doing. Overwriting the - SPI flash may render the system unbootable. - - To compile this driver as a module, choose M here: the module - will be called intel-spi-pci. - -config SPI_INTEL_SPI_PLATFORM - tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" - depends on X86 - select SPI_INTEL_SPI - help - This enables platform support for the Intel PCH/PCU SPI - controller in master mode. This controller is present in modern - Intel hardware and is used to hold BIOS and other persistent - settings. Using this driver it is possible to upgrade BIOS - directly from Linux. - - Say N here unless you know what you are doing. Overwriting the - SPI flash may render the system unbootable. - - To compile this driver as a module, choose M here: the module - will be called intel-spi-platform. diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile index e7abba491d98..6e2a1dc68466 100644 --- a/drivers/mtd/spi-nor/controllers/Makefile +++ b/drivers/mtd/spi-nor/controllers/Makefile @@ -2,6 +2,3 @@ obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o -obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o -obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o -obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c deleted file mode 100644 index 508f7ca098ef..000000000000 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel PCH/PCU SPI flash PCI driver. - * - * Copyright (C) 2016, Intel Corporation - * Author: Mika Westerberg - */ - -#include -#include -#include -#include - -#include "intel-spi.h" - -#define BCR 0xdc -#define BCR_WPD BIT(0) - -static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) -{ - struct pci_dev *pdev = data; - u32 bcr; - - /* Try to make the chip read/write */ - pci_read_config_dword(pdev, BCR, &bcr); - if (!(bcr & BCR_WPD)) { - bcr |= BCR_WPD; - pci_write_config_dword(pdev, BCR, bcr); - pci_read_config_dword(pdev, BCR, &bcr); - } - - return bcr & BCR_WPD; -} - -static const struct intel_spi_boardinfo bxt_info = { - .type = INTEL_SPI_BXT, - .set_writeable = intel_spi_pci_set_writeable, -}; - -static const struct intel_spi_boardinfo cnl_info = { - .type = INTEL_SPI_CNL, - .set_writeable = intel_spi_pci_set_writeable, -}; - -static int intel_spi_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - struct intel_spi_boardinfo *info; - struct intel_spi *ispi; - int ret; - - ret = pcim_enable_device(pdev); - if (ret) - return ret; - - info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), - GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->data = pdev; - ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); - if (IS_ERR(ispi)) - return PTR_ERR(ispi); - - pci_set_drvdata(pdev, ispi); - return 0; -} - -static void intel_spi_pci_remove(struct pci_dev *pdev) -{ - intel_spi_remove(pci_get_drvdata(pdev)); -} - -static const struct pci_device_id intel_spi_pci_ids[] = { - { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, - { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, - { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, - { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, - { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, - { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, - { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info }, - { }, -}; -MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); - -static struct pci_driver intel_spi_pci_driver = { - .name = "intel-spi", - .id_table = intel_spi_pci_ids, - .probe = intel_spi_pci_probe, - .remove = intel_spi_pci_remove, -}; - -module_pci_driver(intel_spi_pci_driver); - -MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver"); -MODULE_AUTHOR("Mika Westerberg "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c deleted file mode 100644 index f80f1086f928..000000000000 --- a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel PCH/PCU SPI flash platform driver. - * - * Copyright (C) 2016, Intel Corporation - * Author: Mika Westerberg - */ - -#include -#include -#include - -#include "intel-spi.h" - -static int intel_spi_platform_probe(struct platform_device *pdev) -{ - struct intel_spi_boardinfo *info; - struct intel_spi *ispi; - struct resource *mem; - - info = dev_get_platdata(&pdev->dev); - if (!info) - return -EINVAL; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ispi = intel_spi_probe(&pdev->dev, mem, info); - if (IS_ERR(ispi)) - return PTR_ERR(ispi); - - platform_set_drvdata(pdev, ispi); - return 0; -} - -static int intel_spi_platform_remove(struct platform_device *pdev) -{ - struct intel_spi *ispi = platform_get_drvdata(pdev); - - return intel_spi_remove(ispi); -} - -static struct platform_driver intel_spi_platform_driver = { - .probe = intel_spi_platform_probe, - .remove = intel_spi_platform_remove, - .driver = { - .name = "intel-spi", - }, -}; - -module_platform_driver(intel_spi_platform_driver); - -MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver"); -MODULE_AUTHOR("Mika Westerberg "); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:intel-spi"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c deleted file mode 100644 index f35597cbea0c..000000000000 --- a/drivers/mtd/spi-nor/controllers/intel-spi.c +++ /dev/null @@ -1,965 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel PCH/PCU SPI flash driver. - * - * Copyright (C) 2016, Intel Corporation - * Author: Mika Westerberg - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "intel-spi.h" - -/* Offsets are from @ispi->base */ -#define BFPREG 0x00 - -#define HSFSTS_CTL 0x04 -#define HSFSTS_CTL_FSMIE BIT(31) -#define HSFSTS_CTL_FDBC_SHIFT 24 -#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) - -#define HSFSTS_CTL_FCYCLE_SHIFT 17 -#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) -/* HW sequencer opcodes */ -#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) -#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) - -#define HSFSTS_CTL_FGO BIT(16) -#define HSFSTS_CTL_FLOCKDN BIT(15) -#define HSFSTS_CTL_FDV BIT(14) -#define HSFSTS_CTL_SCIP BIT(5) -#define HSFSTS_CTL_AEL BIT(2) -#define HSFSTS_CTL_FCERR BIT(1) -#define HSFSTS_CTL_FDONE BIT(0) - -#define FADDR 0x08 -#define DLOCK 0x0c -#define FDATA(n) (0x10 + ((n) * 4)) - -#define FRACC 0x50 - -#define FREG(n) (0x54 + ((n) * 4)) -#define FREG_BASE_MASK 0x3fff -#define FREG_LIMIT_SHIFT 16 -#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) - -/* Offset is from @ispi->pregs */ -#define PR(n) ((n) * 4) -#define PR_WPE BIT(31) -#define PR_LIMIT_SHIFT 16 -#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) -#define PR_RPE BIT(15) -#define PR_BASE_MASK 0x3fff - -/* Offsets are from @ispi->sregs */ -#define SSFSTS_CTL 0x00 -#define SSFSTS_CTL_FSMIE BIT(23) -#define SSFSTS_CTL_DS BIT(22) -#define SSFSTS_CTL_DBC_SHIFT 16 -#define SSFSTS_CTL_SPOP BIT(11) -#define SSFSTS_CTL_ACS BIT(10) -#define SSFSTS_CTL_SCGO BIT(9) -#define SSFSTS_CTL_COP_SHIFT 12 -#define SSFSTS_CTL_FRS BIT(7) -#define SSFSTS_CTL_DOFRS BIT(6) -#define SSFSTS_CTL_AEL BIT(4) -#define SSFSTS_CTL_FCERR BIT(3) -#define SSFSTS_CTL_FDONE BIT(2) -#define SSFSTS_CTL_SCIP BIT(0) - -#define PREOP_OPTYPE 0x04 -#define OPMENU0 0x08 -#define OPMENU1 0x0c - -#define OPTYPE_READ_NO_ADDR 0 -#define OPTYPE_WRITE_NO_ADDR 1 -#define OPTYPE_READ_WITH_ADDR 2 -#define OPTYPE_WRITE_WITH_ADDR 3 - -/* CPU specifics */ -#define BYT_PR 0x74 -#define BYT_SSFSTS_CTL 0x90 -#define BYT_BCR 0xfc -#define BYT_BCR_WPD BIT(0) -#define BYT_FREG_NUM 5 -#define BYT_PR_NUM 5 - -#define LPT_PR 0x74 -#define LPT_SSFSTS_CTL 0x90 -#define LPT_FREG_NUM 5 -#define LPT_PR_NUM 5 - -#define BXT_PR 0x84 -#define BXT_SSFSTS_CTL 0xa0 -#define BXT_FREG_NUM 12 -#define BXT_PR_NUM 6 - -#define CNL_PR 0x84 -#define CNL_FREG_NUM 6 -#define CNL_PR_NUM 5 - -#define LVSCC 0xc4 -#define UVSCC 0xc8 -#define ERASE_OPCODE_SHIFT 8 -#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) -#define ERASE_64K_OPCODE_SHIFT 16 -#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) - -#define INTEL_SPI_TIMEOUT 5000 /* ms */ -#define INTEL_SPI_FIFO_SZ 64 - -/** - * struct intel_spi - Driver private data - * @dev: Device pointer - * @info: Pointer to board specific info - * @nor: SPI NOR layer structure - * @base: Beginning of MMIO space - * @pregs: Start of protection registers - * @sregs: Start of software sequencer registers - * @nregions: Maximum number of regions - * @pr_num: Maximum number of protected range registers - * @locked: Is SPI setting locked - * @swseq_reg: Use SW sequencer in register reads/writes - * @swseq_erase: Use SW sequencer in erase operation - * @erase_64k: 64k erase supported - * @atomic_preopcode: Holds preopcode when atomic sequence is requested - * @opcodes: Opcodes which are supported. This are programmed by BIOS - * before it locks down the controller. - */ -struct intel_spi { - struct device *dev; - const struct intel_spi_boardinfo *info; - struct spi_nor nor; - void __iomem *base; - void __iomem *pregs; - void __iomem *sregs; - size_t nregions; - size_t pr_num; - bool locked; - bool swseq_reg; - bool swseq_erase; - bool erase_64k; - u8 atomic_preopcode; - u8 opcodes[8]; -}; - -static bool writeable; -module_param(writeable, bool, 0); -MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); - -static void intel_spi_dump_regs(struct intel_spi *ispi) -{ - u32 value; - int i; - - dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); - - value = readl(ispi->base + HSFSTS_CTL); - dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); - if (value & HSFSTS_CTL_FLOCKDN) - dev_dbg(ispi->dev, "-> Locked\n"); - - dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); - dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); - - for (i = 0; i < 16; i++) - dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", - i, readl(ispi->base + FDATA(i))); - - dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); - - for (i = 0; i < ispi->nregions; i++) - dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, - readl(ispi->base + FREG(i))); - for (i = 0; i < ispi->pr_num; i++) - dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, - readl(ispi->pregs + PR(i))); - - if (ispi->sregs) { - value = readl(ispi->sregs + SSFSTS_CTL); - dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); - dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", - readl(ispi->sregs + PREOP_OPTYPE)); - dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", - readl(ispi->sregs + OPMENU0)); - dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", - readl(ispi->sregs + OPMENU1)); - } - - if (ispi->info->type == INTEL_SPI_BYT) - dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); - - dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); - dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); - - dev_dbg(ispi->dev, "Protected regions:\n"); - for (i = 0; i < ispi->pr_num; i++) { - u32 base, limit; - - value = readl(ispi->pregs + PR(i)); - if (!(value & (PR_WPE | PR_RPE))) - continue; - - limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; - base = value & PR_BASE_MASK; - - dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", - i, base << 12, (limit << 12) | 0xfff, - value & PR_WPE ? 'W' : '.', - value & PR_RPE ? 'R' : '.'); - } - - dev_dbg(ispi->dev, "Flash regions:\n"); - for (i = 0; i < ispi->nregions; i++) { - u32 region, base, limit; - - region = readl(ispi->base + FREG(i)); - base = region & FREG_BASE_MASK; - limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; - - if (base >= limit || (i > 0 && limit == 0)) - dev_dbg(ispi->dev, " %02d disabled\n", i); - else - dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", - i, base << 12, (limit << 12) | 0xfff); - } - - dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", - ispi->swseq_reg ? 'S' : 'H'); - dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", - ispi->swseq_erase ? 'S' : 'H'); -} - -/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ -static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) -{ - size_t bytes; - int i = 0; - - if (size > INTEL_SPI_FIFO_SZ) - return -EINVAL; - - while (size > 0) { - bytes = min_t(size_t, size, 4); - memcpy_fromio(buf, ispi->base + FDATA(i), bytes); - size -= bytes; - buf += bytes; - i++; - } - - return 0; -} - -/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ -static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, - size_t size) -{ - size_t bytes; - int i = 0; - - if (size > INTEL_SPI_FIFO_SZ) - return -EINVAL; - - while (size > 0) { - bytes = min_t(size_t, size, 4); - memcpy_toio(ispi->base + FDATA(i), buf, bytes); - size -= bytes; - buf += bytes; - i++; - } - - return 0; -} - -static int intel_spi_wait_hw_busy(struct intel_spi *ispi) -{ - u32 val; - - return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, - !(val & HSFSTS_CTL_SCIP), 0, - INTEL_SPI_TIMEOUT * 1000); -} - -static int intel_spi_wait_sw_busy(struct intel_spi *ispi) -{ - u32 val; - - return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, - !(val & SSFSTS_CTL_SCIP), 0, - INTEL_SPI_TIMEOUT * 1000); -} - -static bool intel_spi_set_writeable(struct intel_spi *ispi) -{ - if (!ispi->info->set_writeable) - return false; - - return ispi->info->set_writeable(ispi->base, ispi->info->data); -} - -static int intel_spi_init(struct intel_spi *ispi) -{ - u32 opmenu0, opmenu1, lvscc, uvscc, val; - int i; - - switch (ispi->info->type) { - case INTEL_SPI_BYT: - ispi->sregs = ispi->base + BYT_SSFSTS_CTL; - ispi->pregs = ispi->base + BYT_PR; - ispi->nregions = BYT_FREG_NUM; - ispi->pr_num = BYT_PR_NUM; - ispi->swseq_reg = true; - break; - - case INTEL_SPI_LPT: - ispi->sregs = ispi->base + LPT_SSFSTS_CTL; - ispi->pregs = ispi->base + LPT_PR; - ispi->nregions = LPT_FREG_NUM; - ispi->pr_num = LPT_PR_NUM; - ispi->swseq_reg = true; - break; - - case INTEL_SPI_BXT: - ispi->sregs = ispi->base + BXT_SSFSTS_CTL; - ispi->pregs = ispi->base + BXT_PR; - ispi->nregions = BXT_FREG_NUM; - ispi->pr_num = BXT_PR_NUM; - ispi->erase_64k = true; - break; - - case INTEL_SPI_CNL: - ispi->sregs = NULL; - ispi->pregs = ispi->base + CNL_PR; - ispi->nregions = CNL_FREG_NUM; - ispi->pr_num = CNL_PR_NUM; - break; - - default: - return -EINVAL; - } - - /* Try to disable write protection if user asked to do so */ - if (writeable && !intel_spi_set_writeable(ispi)) { - dev_warn(ispi->dev, "can't disable chip write protection\n"); - writeable = false; - } - - /* Disable #SMI generation from HW sequencer */ - val = readl(ispi->base + HSFSTS_CTL); - val &= ~HSFSTS_CTL_FSMIE; - writel(val, ispi->base + HSFSTS_CTL); - - /* - * Determine whether erase operation should use HW or SW sequencer. - * - * The HW sequencer has a predefined list of opcodes, with only the - * erase opcode being programmable in LVSCC and UVSCC registers. - * If these registers don't contain a valid erase opcode, erase - * cannot be done using HW sequencer. - */ - lvscc = readl(ispi->base + LVSCC); - uvscc = readl(ispi->base + UVSCC); - if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) - ispi->swseq_erase = true; - /* SPI controller on Intel BXT supports 64K erase opcode */ - if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) - if (!(lvscc & ERASE_64K_OPCODE_MASK) || - !(uvscc & ERASE_64K_OPCODE_MASK)) - ispi->erase_64k = false; - - if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { - dev_err(ispi->dev, "software sequencer not supported, but required\n"); - return -EINVAL; - } - - /* - * Some controllers can only do basic operations using hardware - * sequencer. All other operations are supposed to be carried out - * using software sequencer. - */ - if (ispi->swseq_reg) { - /* Disable #SMI generation from SW sequencer */ - val = readl(ispi->sregs + SSFSTS_CTL); - val &= ~SSFSTS_CTL_FSMIE; - writel(val, ispi->sregs + SSFSTS_CTL); - } - - /* Check controller's lock status */ - val = readl(ispi->base + HSFSTS_CTL); - ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); - - if (ispi->locked && ispi->sregs) { - /* - * BIOS programs allowed opcodes and then locks down the - * register. So read back what opcodes it decided to support. - * That's the set we are going to support as well. - */ - opmenu0 = readl(ispi->sregs + OPMENU0); - opmenu1 = readl(ispi->sregs + OPMENU1); - - if (opmenu0 && opmenu1) { - for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { - ispi->opcodes[i] = opmenu0 >> i * 8; - ispi->opcodes[i + 4] = opmenu1 >> i * 8; - } - } - } - - intel_spi_dump_regs(ispi); - - return 0; -} - -static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) -{ - int i; - int preop; - - if (ispi->locked) { - for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) - if (ispi->opcodes[i] == opcode) - return i; - - return -EINVAL; - } - - /* The lock is off, so just use index 0 */ - writel(opcode, ispi->sregs + OPMENU0); - preop = readw(ispi->sregs + PREOP_OPTYPE); - writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); - - return 0; -} - -static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) -{ - u32 val, status; - int ret; - - val = readl(ispi->base + HSFSTS_CTL); - val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); - - switch (opcode) { - case SPINOR_OP_RDID: - val |= HSFSTS_CTL_FCYCLE_RDID; - break; - case SPINOR_OP_WRSR: - val |= HSFSTS_CTL_FCYCLE_WRSR; - break; - case SPINOR_OP_RDSR: - val |= HSFSTS_CTL_FCYCLE_RDSR; - break; - default: - return -EINVAL; - } - - if (len > INTEL_SPI_FIFO_SZ) - return -EINVAL; - - val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; - val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; - val |= HSFSTS_CTL_FGO; - writel(val, ispi->base + HSFSTS_CTL); - - ret = intel_spi_wait_hw_busy(ispi); - if (ret) - return ret; - - status = readl(ispi->base + HSFSTS_CTL); - if (status & HSFSTS_CTL_FCERR) - return -EIO; - else if (status & HSFSTS_CTL_AEL) - return -EACCES; - - return 0; -} - -static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, - int optype) -{ - u32 val = 0, status; - u8 atomic_preopcode; - int ret; - - ret = intel_spi_opcode_index(ispi, opcode, optype); - if (ret < 0) - return ret; - - if (len > INTEL_SPI_FIFO_SZ) - return -EINVAL; - - /* - * Always clear it after each SW sequencer operation regardless - * of whether it is successful or not. - */ - atomic_preopcode = ispi->atomic_preopcode; - ispi->atomic_preopcode = 0; - - /* Only mark 'Data Cycle' bit when there is data to be transferred */ - if (len > 0) - val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; - val |= ret << SSFSTS_CTL_COP_SHIFT; - val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; - val |= SSFSTS_CTL_SCGO; - if (atomic_preopcode) { - u16 preop; - - switch (optype) { - case OPTYPE_WRITE_NO_ADDR: - case OPTYPE_WRITE_WITH_ADDR: - /* Pick matching preopcode for the atomic sequence */ - preop = readw(ispi->sregs + PREOP_OPTYPE); - if ((preop & 0xff) == atomic_preopcode) - ; /* Do nothing */ - else if ((preop >> 8) == atomic_preopcode) - val |= SSFSTS_CTL_SPOP; - else - return -EINVAL; - - /* Enable atomic sequence */ - val |= SSFSTS_CTL_ACS; - break; - - default: - return -EINVAL; - } - - } - writel(val, ispi->sregs + SSFSTS_CTL); - - ret = intel_spi_wait_sw_busy(ispi); - if (ret) - return ret; - - status = readl(ispi->sregs + SSFSTS_CTL); - if (status & SSFSTS_CTL_FCERR) - return -EIO; - else if (status & SSFSTS_CTL_AEL) - return -EACCES; - - return 0; -} - -static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, - size_t len) -{ - struct intel_spi *ispi = nor->priv; - int ret; - - /* Address of the first chip */ - writel(0, ispi->base + FADDR); - - if (ispi->swseq_reg) - ret = intel_spi_sw_cycle(ispi, opcode, len, - OPTYPE_READ_NO_ADDR); - else - ret = intel_spi_hw_cycle(ispi, opcode, len); - - if (ret) - return ret; - - return intel_spi_read_block(ispi, buf, len); -} - -static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, - size_t len) -{ - struct intel_spi *ispi = nor->priv; - int ret; - - /* - * This is handled with atomic operation and preop code in Intel - * controller so we only verify that it is available. If the - * controller is not locked, program the opcode to the PREOP - * register for later use. - * - * When hardware sequencer is used there is no need to program - * any opcodes (it handles them automatically as part of a command). - */ - if (opcode == SPINOR_OP_WREN) { - u16 preop; - - if (!ispi->swseq_reg) - return 0; - - preop = readw(ispi->sregs + PREOP_OPTYPE); - if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { - if (ispi->locked) - return -EINVAL; - writel(opcode, ispi->sregs + PREOP_OPTYPE); - } - - /* - * This enables atomic sequence on next SW sycle. Will - * be cleared after next operation. - */ - ispi->atomic_preopcode = opcode; - return 0; - } - - /* - * We hope that HW sequencer will do the right thing automatically and - * with the SW sequencer we cannot use preopcode anyway, so just ignore - * the Write Disable operation and pretend it was completed - * successfully. - */ - if (opcode == SPINOR_OP_WRDI) - return 0; - - writel(0, ispi->base + FADDR); - - /* Write the value beforehand */ - ret = intel_spi_write_block(ispi, buf, len); - if (ret) - return ret; - - if (ispi->swseq_reg) - return intel_spi_sw_cycle(ispi, opcode, len, - OPTYPE_WRITE_NO_ADDR); - return intel_spi_hw_cycle(ispi, opcode, len); -} - -static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, - u_char *read_buf) -{ - struct intel_spi *ispi = nor->priv; - size_t block_size, retlen = 0; - u32 val, status; - ssize_t ret; - - /* - * Atomic sequence is not expected with HW sequencer reads. Make - * sure it is cleared regardless. - */ - if (WARN_ON_ONCE(ispi->atomic_preopcode)) - ispi->atomic_preopcode = 0; - - switch (nor->read_opcode) { - case SPINOR_OP_READ: - case SPINOR_OP_READ_FAST: - case SPINOR_OP_READ_4B: - case SPINOR_OP_READ_FAST_4B: - break; - default: - return -EINVAL; - } - - while (len > 0) { - block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); - - /* Read cannot cross 4K boundary */ - block_size = min_t(loff_t, from + block_size, - round_up(from + 1, SZ_4K)) - from; - - writel(from, ispi->base + FADDR); - - val = readl(ispi->base + HSFSTS_CTL); - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; - val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; - val |= HSFSTS_CTL_FCYCLE_READ; - val |= HSFSTS_CTL_FGO; - writel(val, ispi->base + HSFSTS_CTL); - - ret = intel_spi_wait_hw_busy(ispi); - if (ret) - return ret; - - status = readl(ispi->base + HSFSTS_CTL); - if (status & HSFSTS_CTL_FCERR) - ret = -EIO; - else if (status & HSFSTS_CTL_AEL) - ret = -EACCES; - - if (ret < 0) { - dev_err(ispi->dev, "read error: %llx: %#x\n", from, - status); - return ret; - } - - ret = intel_spi_read_block(ispi, read_buf, block_size); - if (ret) - return ret; - - len -= block_size; - from += block_size; - retlen += block_size; - read_buf += block_size; - } - - return retlen; -} - -static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, - const u_char *write_buf) -{ - struct intel_spi *ispi = nor->priv; - size_t block_size, retlen = 0; - u32 val, status; - ssize_t ret; - - /* Not needed with HW sequencer write, make sure it is cleared */ - ispi->atomic_preopcode = 0; - - while (len > 0) { - block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); - - /* Write cannot cross 4K boundary */ - block_size = min_t(loff_t, to + block_size, - round_up(to + 1, SZ_4K)) - to; - - writel(to, ispi->base + FADDR); - - val = readl(ispi->base + HSFSTS_CTL); - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; - val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; - val |= HSFSTS_CTL_FCYCLE_WRITE; - - ret = intel_spi_write_block(ispi, write_buf, block_size); - if (ret) { - dev_err(ispi->dev, "failed to write block\n"); - return ret; - } - - /* Start the write now */ - val |= HSFSTS_CTL_FGO; - writel(val, ispi->base + HSFSTS_CTL); - - ret = intel_spi_wait_hw_busy(ispi); - if (ret) { - dev_err(ispi->dev, "timeout\n"); - return ret; - } - - status = readl(ispi->base + HSFSTS_CTL); - if (status & HSFSTS_CTL_FCERR) - ret = -EIO; - else if (status & HSFSTS_CTL_AEL) - ret = -EACCES; - - if (ret < 0) { - dev_err(ispi->dev, "write error: %llx: %#x\n", to, - status); - return ret; - } - - len -= block_size; - to += block_size; - retlen += block_size; - write_buf += block_size; - } - - return retlen; -} - -static int intel_spi_erase(struct spi_nor *nor, loff_t offs) -{ - size_t erase_size, len = nor->mtd.erasesize; - struct intel_spi *ispi = nor->priv; - u32 val, status, cmd; - int ret; - - /* If the hardware can do 64k erase use that when possible */ - if (len >= SZ_64K && ispi->erase_64k) { - cmd = HSFSTS_CTL_FCYCLE_ERASE_64K; - erase_size = SZ_64K; - } else { - cmd = HSFSTS_CTL_FCYCLE_ERASE; - erase_size = SZ_4K; - } - - if (ispi->swseq_erase) { - while (len > 0) { - writel(offs, ispi->base + FADDR); - - ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, - 0, OPTYPE_WRITE_WITH_ADDR); - if (ret) - return ret; - - offs += erase_size; - len -= erase_size; - } - - return 0; - } - - /* Not needed with HW sequencer erase, make sure it is cleared */ - ispi->atomic_preopcode = 0; - - while (len > 0) { - writel(offs, ispi->base + FADDR); - - val = readl(ispi->base + HSFSTS_CTL); - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; - val |= cmd; - val |= HSFSTS_CTL_FGO; - writel(val, ispi->base + HSFSTS_CTL); - - ret = intel_spi_wait_hw_busy(ispi); - if (ret) - return ret; - - status = readl(ispi->base + HSFSTS_CTL); - if (status & HSFSTS_CTL_FCERR) - return -EIO; - else if (status & HSFSTS_CTL_AEL) - return -EACCES; - - offs += erase_size; - len -= erase_size; - } - - return 0; -} - -static bool intel_spi_is_protected(const struct intel_spi *ispi, - unsigned int base, unsigned int limit) -{ - int i; - - for (i = 0; i < ispi->pr_num; i++) { - u32 pr_base, pr_limit, pr_value; - - pr_value = readl(ispi->pregs + PR(i)); - if (!(pr_value & (PR_WPE | PR_RPE))) - continue; - - pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; - pr_base = pr_value & PR_BASE_MASK; - - if (pr_base >= base && pr_limit <= limit) - return true; - } - - return false; -} - -/* - * There will be a single partition holding all enabled flash regions. We - * call this "BIOS". - */ -static void intel_spi_fill_partition(struct intel_spi *ispi, - struct mtd_partition *part) -{ - u64 end; - int i; - - memset(part, 0, sizeof(*part)); - - /* Start from the mandatory descriptor region */ - part->size = 4096; - part->name = "BIOS"; - - /* - * Now try to find where this partition ends based on the flash - * region registers. - */ - for (i = 1; i < ispi->nregions; i++) { - u32 region, base, limit; - - region = readl(ispi->base + FREG(i)); - base = region & FREG_BASE_MASK; - limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; - - if (base >= limit || limit == 0) - continue; - - /* - * If any of the regions have protection bits set, make the - * whole partition read-only to be on the safe side. - * - * Also if the user did not ask the chip to be writeable - * mask the bit too. - */ - if (!writeable || intel_spi_is_protected(ispi, base, limit)) - part->mask_flags |= MTD_WRITEABLE; - - end = (limit << 12) + 4096; - if (end > part->size) - part->size = end; - } -} - -static const struct spi_nor_controller_ops intel_spi_controller_ops = { - .read_reg = intel_spi_read_reg, - .write_reg = intel_spi_write_reg, - .read = intel_spi_read, - .write = intel_spi_write, - .erase = intel_spi_erase, -}; - -struct intel_spi *intel_spi_probe(struct device *dev, - struct resource *mem, const struct intel_spi_boardinfo *info) -{ - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ | - SNOR_HWCAPS_READ_FAST | - SNOR_HWCAPS_PP, - }; - struct mtd_partition part; - struct intel_spi *ispi; - int ret; - - if (!info || !mem) - return ERR_PTR(-EINVAL); - - ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL); - if (!ispi) - return ERR_PTR(-ENOMEM); - - ispi->base = devm_ioremap_resource(dev, mem); - if (IS_ERR(ispi->base)) - return ERR_CAST(ispi->base); - - ispi->dev = dev; - ispi->info = info; - - ret = intel_spi_init(ispi); - if (ret) - return ERR_PTR(ret); - - ispi->nor.dev = ispi->dev; - ispi->nor.priv = ispi; - ispi->nor.controller_ops = &intel_spi_controller_ops; - - ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); - if (ret) { - dev_info(dev, "failed to locate the chip\n"); - return ERR_PTR(ret); - } - - intel_spi_fill_partition(ispi, &part); - - ret = mtd_device_register(&ispi->nor.mtd, &part, 1); - if (ret) - return ERR_PTR(ret); - - return ispi; -} -EXPORT_SYMBOL_GPL(intel_spi_probe); - -int intel_spi_remove(struct intel_spi *ispi) -{ - return mtd_device_unregister(&ispi->nor.mtd); -} -EXPORT_SYMBOL_GPL(intel_spi_remove); - -MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); -MODULE_AUTHOR("Mika Westerberg "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h deleted file mode 100644 index f2871179fd34..000000000000 --- a/drivers/mtd/spi-nor/controllers/intel-spi.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Intel PCH/PCU SPI flash driver. - * - * Copyright (C) 2016, Intel Corporation - * Author: Mika Westerberg - */ - -#ifndef INTEL_SPI_H -#define INTEL_SPI_H - -#include - -struct intel_spi; -struct resource; - -struct intel_spi *intel_spi_probe(struct device *dev, - struct resource *mem, const struct intel_spi_boardinfo *info); -int intel_spi_remove(struct intel_spi *ispi); - -#endif /* INTEL_SPI_H */ diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index b2a8821971e1..0201257511fb 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -427,6 +427,45 @@ config SPI_INGENIC To compile this driver as a module, choose M here: the module will be called spi-ingenic. +config SPI_INTEL + tristate + +config SPI_INTEL_PCI + tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" + depends on PCI + depends on X86 || COMPILE_TEST + depends on SPI_MEM + select SPI_INTEL + help + This enables PCI support for the Intel PCH/PCU SPI controller in + master mode. This controller is present in modern Intel hardware + and is used to hold BIOS and other persistent settings. Using + this driver it is possible to upgrade BIOS directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called spi-intel-pci. + +config SPI_INTEL_PLATFORM + tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" + depends on X86 || COMPILE_TEST + depends on SPI_MEM + select SPI_INTEL + help + This enables platform support for the Intel PCH/PCU SPI + controller in master mode. This controller is present in modern + Intel hardware and is used to hold BIOS and other persistent + settings. Using this driver it is possible to upgrade BIOS + directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called spi-intel-platform. + config SPI_JCORE tristate "J-Core SPI Master" depends on OF && (SUPERH || COMPILE_TEST) diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index dd7393a6046f..36b2045f08d2 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -61,6 +61,9 @@ obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o obj-$(CONFIG_SPI_IMX) += spi-imx.o obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o +obj-$(CONFIG_SPI_INTEL) += spi-intel.o +obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o +obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o obj-$(CONFIG_SPI_JCORE) += spi-jcore.o obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c new file mode 100644 index 000000000000..a9cb4d77ffe3 --- /dev/null +++ b/drivers/spi/spi-intel-pci.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash PCI driver. + * + * Copyright (C) 2016 - 2022, Intel Corporation + * Author: Mika Westerberg + */ + +#include +#include + +#include "spi-intel.h" + +#define BCR 0xdc +#define BCR_WPD BIT(0) + +static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + +static const struct intel_spi_boardinfo bxt_info = { + .type = INTEL_SPI_BXT, + .set_writeable = intel_spi_pci_set_writeable, +}; + +static const struct intel_spi_boardinfo cnl_info = { + .type = INTEL_SPI_CNL, + .set_writeable = intel_spi_pci_set_writeable, +}; + +static int intel_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct intel_spi_boardinfo *info; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->data = pdev; + return intel_spi_probe(&pdev->dev, &pdev->resource[0], info); +} + +static const struct pci_device_id intel_spi_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info }, + { }, +}; +MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); + +static struct pci_driver intel_spi_pci_driver = { + .name = "intel-spi", + .id_table = intel_spi_pci_ids, + .probe = intel_spi_pci_probe, +}; + +module_pci_driver(intel_spi_pci_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver"); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c new file mode 100644 index 000000000000..2ef09fa35661 --- /dev/null +++ b/drivers/spi/spi-intel-platform.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash platform driver. + * + * Copyright (C) 2016 - 2022, Intel Corporation + * Author: Mika Westerberg + */ + +#include +#include + +#include "spi-intel.h" + +static int intel_spi_platform_probe(struct platform_device *pdev) +{ + struct intel_spi_boardinfo *info; + struct resource *mem; + + info = dev_get_platdata(&pdev->dev); + if (!info) + return -EINVAL; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + return intel_spi_probe(&pdev->dev, mem, info); +} + +static struct platform_driver intel_spi_platform_driver = { + .probe = intel_spi_platform_probe, + .driver = { + .name = "intel-spi", + }, +}; + +module_platform_driver(intel_spi_platform_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver"); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:intel-spi"); diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c new file mode 100644 index 000000000000..e937cfe85559 --- /dev/null +++ b/drivers/spi/spi-intel.c @@ -0,0 +1,1250 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016 - 2022, Intel Corporation + * Author: Mika Westerberg + */ + +#include +#include + +#include +#include + +#include +#include +#include + +#include "spi-intel.h" + +/* Offsets are from @ispi->base */ +#define BFPREG 0x00 + +#define HSFSTS_CTL 0x04 +#define HSFSTS_CTL_FSMIE BIT(31) +#define HSFSTS_CTL_FDBC_SHIFT 24 +#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) + +#define HSFSTS_CTL_FCYCLE_SHIFT 17 +#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) +/* HW sequencer opcodes */ +#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) + +#define HSFSTS_CTL_FGO BIT(16) +#define HSFSTS_CTL_FLOCKDN BIT(15) +#define HSFSTS_CTL_FDV BIT(14) +#define HSFSTS_CTL_SCIP BIT(5) +#define HSFSTS_CTL_AEL BIT(2) +#define HSFSTS_CTL_FCERR BIT(1) +#define HSFSTS_CTL_FDONE BIT(0) + +#define FADDR 0x08 +#define DLOCK 0x0c +#define FDATA(n) (0x10 + ((n) * 4)) + +#define FRACC 0x50 + +#define FREG(n) (0x54 + ((n) * 4)) +#define FREG_BASE_MASK 0x3fff +#define FREG_LIMIT_SHIFT 16 +#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) + +/* Offset is from @ispi->pregs */ +#define PR(n) ((n) * 4) +#define PR_WPE BIT(31) +#define PR_LIMIT_SHIFT 16 +#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) +#define PR_RPE BIT(15) +#define PR_BASE_MASK 0x3fff + +/* Offsets are from @ispi->sregs */ +#define SSFSTS_CTL 0x00 +#define SSFSTS_CTL_FSMIE BIT(23) +#define SSFSTS_CTL_DS BIT(22) +#define SSFSTS_CTL_DBC_SHIFT 16 +#define SSFSTS_CTL_SPOP BIT(11) +#define SSFSTS_CTL_ACS BIT(10) +#define SSFSTS_CTL_SCGO BIT(9) +#define SSFSTS_CTL_COP_SHIFT 12 +#define SSFSTS_CTL_FRS BIT(7) +#define SSFSTS_CTL_DOFRS BIT(6) +#define SSFSTS_CTL_AEL BIT(4) +#define SSFSTS_CTL_FCERR BIT(3) +#define SSFSTS_CTL_FDONE BIT(2) +#define SSFSTS_CTL_SCIP BIT(0) + +#define PREOP_OPTYPE 0x04 +#define OPMENU0 0x08 +#define OPMENU1 0x0c + +#define OPTYPE_READ_NO_ADDR 0 +#define OPTYPE_WRITE_NO_ADDR 1 +#define OPTYPE_READ_WITH_ADDR 2 +#define OPTYPE_WRITE_WITH_ADDR 3 + +/* CPU specifics */ +#define BYT_PR 0x74 +#define BYT_SSFSTS_CTL 0x90 +#define BYT_FREG_NUM 5 +#define BYT_PR_NUM 5 + +#define LPT_PR 0x74 +#define LPT_SSFSTS_CTL 0x90 +#define LPT_FREG_NUM 5 +#define LPT_PR_NUM 5 + +#define BXT_PR 0x84 +#define BXT_SSFSTS_CTL 0xa0 +#define BXT_FREG_NUM 12 +#define BXT_PR_NUM 6 + +#define CNL_PR 0x84 +#define CNL_FREG_NUM 6 +#define CNL_PR_NUM 5 + +#define LVSCC 0xc4 +#define UVSCC 0xc8 +#define ERASE_OPCODE_SHIFT 8 +#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_SHIFT 16 +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) + +#define INTEL_SPI_TIMEOUT 5000 /* ms */ +#define INTEL_SPI_FIFO_SZ 64 + +/** + * struct intel_spi - Driver private data + * @dev: Device pointer + * @info: Pointer to board specific info + * @base: Beginning of MMIO space + * @pregs: Start of protection registers + * @sregs: Start of software sequencer registers + * @master: Pointer to the SPI controller structure + * @nregions: Maximum number of regions + * @pr_num: Maximum number of protected range registers + * @locked: Is SPI setting locked + * @swseq_reg: Use SW sequencer in register reads/writes + * @swseq_erase: Use SW sequencer in erase operation + * @atomic_preopcode: Holds preopcode when atomic sequence is requested + * @opcodes: Opcodes which are supported. This are programmed by BIOS + * before it locks down the controller. + * @mem_ops: Pointer to SPI MEM ops supported by the controller + */ +struct intel_spi { + struct device *dev; + const struct intel_spi_boardinfo *info; + void __iomem *base; + void __iomem *pregs; + void __iomem *sregs; + struct spi_controller *master; + size_t nregions; + size_t pr_num; + bool locked; + bool swseq_reg; + bool swseq_erase; + u8 atomic_preopcode; + u8 opcodes[8]; + const struct intel_spi_mem_op *mem_ops; +}; + +struct intel_spi_mem_op { + struct spi_mem_op mem_op; + u32 replacement_op; + int (*exec_op)(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op); +}; + +static bool writeable; +module_param(writeable, bool, 0); +MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); + +static void intel_spi_dump_regs(struct intel_spi *ispi) +{ + u32 value; + int i; + + dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); + + value = readl(ispi->base + HSFSTS_CTL); + dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); + if (value & HSFSTS_CTL_FLOCKDN) + dev_dbg(ispi->dev, "-> Locked\n"); + + dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); + dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); + + for (i = 0; i < 16; i++) + dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", + i, readl(ispi->base + FDATA(i))); + + dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); + + for (i = 0; i < ispi->nregions; i++) + dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, + readl(ispi->base + FREG(i))); + for (i = 0; i < ispi->pr_num; i++) + dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, + readl(ispi->pregs + PR(i))); + + if (ispi->sregs) { + value = readl(ispi->sregs + SSFSTS_CTL); + dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); + dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", + readl(ispi->sregs + PREOP_OPTYPE)); + dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", + readl(ispi->sregs + OPMENU0)); + dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", + readl(ispi->sregs + OPMENU1)); + } + + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); + + dev_dbg(ispi->dev, "Protected regions:\n"); + for (i = 0; i < ispi->pr_num; i++) { + u32 base, limit; + + value = readl(ispi->pregs + PR(i)); + if (!(value & (PR_WPE | PR_RPE))) + continue; + + limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + base = value & PR_BASE_MASK; + + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", + i, base << 12, (limit << 12) | 0xfff, + value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.'); + } + + dev_dbg(ispi->dev, "Flash regions:\n"); + for (i = 0; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || (i > 0 && limit == 0)) + dev_dbg(ispi->dev, " %02d disabled\n", i); + else + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", + i, base << 12, (limit << 12) | 0xfff); + } + + dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", + ispi->swseq_reg ? 'S' : 'H'); + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", + ispi->swseq_erase ? 'S' : 'H'); +} + +/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ +static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_fromio(buf, ispi->base + FDATA(i), bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ +static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, + size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_toio(ispi->base + FDATA(i), buf, bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +static int intel_spi_wait_hw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, + !(val & HSFSTS_CTL_SCIP), 0, + INTEL_SPI_TIMEOUT * 1000); +} + +static int intel_spi_wait_sw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, + !(val & SSFSTS_CTL_SCIP), 0, + INTEL_SPI_TIMEOUT * 1000); +} + +static bool intel_spi_set_writeable(struct intel_spi *ispi) +{ + if (!ispi->info->set_writeable) + return false; + + return ispi->info->set_writeable(ispi->base, ispi->info->data); +} + +static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) +{ + int i; + int preop; + + if (ispi->locked) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) + if (ispi->opcodes[i] == opcode) + return i; + + return -EINVAL; + } + + /* The lock is off, so just use index 0 */ + writel(opcode, ispi->sregs + OPMENU0); + preop = readw(ispi->sregs + PREOP_OPTYPE); + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); + + return 0; +} + +static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) +{ + u32 val, status; + int ret; + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); + + switch (opcode) { + case SPINOR_OP_RDID: + val |= HSFSTS_CTL_FCYCLE_RDID; + break; + case SPINOR_OP_WRSR: + val |= HSFSTS_CTL_FCYCLE_WRSR; + break; + case SPINOR_OP_RDSR: + val |= HSFSTS_CTL_FCYCLE_RDSR; + break; + default: + return -EINVAL; + } + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + else if (status & HSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, + int optype) +{ + u32 val = 0, status; + u8 atomic_preopcode; + int ret; + + ret = intel_spi_opcode_index(ispi, opcode, optype); + if (ret < 0) + return ret; + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + /* + * Always clear it after each SW sequencer operation regardless + * of whether it is successful or not. + */ + atomic_preopcode = ispi->atomic_preopcode; + ispi->atomic_preopcode = 0; + + /* Only mark 'Data Cycle' bit when there is data to be transferred */ + if (len > 0) + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val |= ret << SSFSTS_CTL_COP_SHIFT; + val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; + val |= SSFSTS_CTL_SCGO; + if (atomic_preopcode) { + u16 preop; + + switch (optype) { + case OPTYPE_WRITE_NO_ADDR: + case OPTYPE_WRITE_WITH_ADDR: + /* Pick matching preopcode for the atomic sequence */ + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) == atomic_preopcode) + ; /* Do nothing */ + else if ((preop >> 8) == atomic_preopcode) + val |= SSFSTS_CTL_SPOP; + else + return -EINVAL; + + /* Enable atomic sequence */ + val |= SSFSTS_CTL_ACS; + break; + + default: + return -EINVAL; + } + } + writel(val, ispi->sregs + SSFSTS_CTL); + + ret = intel_spi_wait_sw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->sregs + SSFSTS_CTL); + if (status & SSFSTS_CTL_FCERR) + return -EIO; + else if (status & SSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_read_reg(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + size_t nbytes = op->data.nbytes; + u8 opcode = op->cmd.opcode; + int ret; + + /* Address of the first chip */ + writel(0, ispi->base + FADDR); + + if (ispi->swseq_reg) + ret = intel_spi_sw_cycle(ispi, opcode, nbytes, + OPTYPE_READ_NO_ADDR); + else + ret = intel_spi_hw_cycle(ispi, opcode, nbytes); + + if (ret) + return ret; + + return intel_spi_read_block(ispi, op->data.buf.in, nbytes); +} + +static int intel_spi_write_reg(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + size_t nbytes = op->data.nbytes; + u8 opcode = op->cmd.opcode; + int ret; + + /* + * This is handled with atomic operation and preop code in Intel + * controller so we only verify that it is available. If the + * controller is not locked, program the opcode to the PREOP + * register for later use. + * + * When hardware sequencer is used there is no need to program + * any opcodes (it handles them automatically as part of a command). + */ + if (opcode == SPINOR_OP_WREN) { + u16 preop; + + if (!ispi->swseq_reg) + return 0; + + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { + if (ispi->locked) + return -EINVAL; + writel(opcode, ispi->sregs + PREOP_OPTYPE); + } + + /* + * This enables atomic sequence on next SW sycle. Will + * be cleared after next operation. + */ + ispi->atomic_preopcode = opcode; + return 0; + } + + /* + * We hope that HW sequencer will do the right thing automatically and + * with the SW sequencer we cannot use preopcode anyway, so just ignore + * the Write Disable operation and pretend it was completed + * successfully. + */ + if (opcode == SPINOR_OP_WRDI) + return 0; + + writel(0, ispi->base + FADDR); + + /* Write the value beforehand */ + ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes); + if (ret) + return ret; + + if (ispi->swseq_reg) + return intel_spi_sw_cycle(ispi, opcode, nbytes, + OPTYPE_WRITE_NO_ADDR); + return intel_spi_hw_cycle(ispi, opcode, nbytes); +} + +static int intel_spi_read(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + void *read_buf = op->data.buf.in; + size_t block_size, nbytes = op->data.nbytes; + u32 addr = op->addr.val; + u32 val, status; + int ret; + + /* + * Atomic sequence is not expected with HW sequencer reads. Make + * sure it is cleared regardless. + */ + if (WARN_ON_ONCE(ispi->atomic_preopcode)) + ispi->atomic_preopcode = 0; + + while (nbytes > 0) { + block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ); + + /* Read cannot cross 4K boundary */ + block_size = min_t(loff_t, addr + block_size, + round_up(addr + 1, SZ_4K)) - addr; + + writel(addr, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_READ; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "read error: %x: %#x\n", addr, status); + return ret; + } + + ret = intel_spi_read_block(ispi, read_buf, block_size); + if (ret) + return ret; + + nbytes -= block_size; + addr += block_size; + read_buf += block_size; + } + + return 0; +} + +static int intel_spi_write(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + size_t block_size, nbytes = op->data.nbytes; + const void *write_buf = op->data.buf.out; + u32 addr = op->addr.val; + u32 val, status; + int ret; + + /* Not needed with HW sequencer write, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + while (nbytes > 0) { + block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ); + + /* Write cannot cross 4K boundary */ + block_size = min_t(loff_t, addr + block_size, + round_up(addr + 1, SZ_4K)) - addr; + + writel(addr, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_WRITE; + + ret = intel_spi_write_block(ispi, write_buf, block_size); + if (ret) { + dev_err(ispi->dev, "failed to write block\n"); + return ret; + } + + /* Start the write now */ + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) { + dev_err(ispi->dev, "timeout\n"); + return ret; + } + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "write error: %x: %#x\n", addr, status); + return ret; + } + + nbytes -= block_size; + addr += block_size; + write_buf += block_size; + } + + return 0; +} + +static int intel_spi_erase(struct intel_spi *ispi, + const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + u8 opcode = op->cmd.opcode; + u32 addr = op->addr.val; + u32 val, status; + int ret; + + writel(addr, ispi->base + FADDR); + + if (ispi->swseq_erase) + return intel_spi_sw_cycle(ispi, opcode, 0, + OPTYPE_WRITE_WITH_ADDR); + + /* Not needed with HW sequencer erase, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= HSFSTS_CTL_FGO; + val |= iop->replacement_op; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + if (status & HSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop, + const struct spi_mem_op *op) +{ + if (iop->mem_op.cmd.nbytes != op->cmd.nbytes || + iop->mem_op.cmd.buswidth != op->cmd.buswidth || + iop->mem_op.cmd.dtr != op->cmd.dtr || + iop->mem_op.cmd.opcode != op->cmd.opcode) + return false; + + if (iop->mem_op.addr.nbytes != op->addr.nbytes || + iop->mem_op.addr.dtr != op->addr.dtr) + return false; + + if (iop->mem_op.data.dir != op->data.dir || + iop->mem_op.data.dtr != op->data.dtr) + return false; + + if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) { + if (iop->mem_op.data.buswidth != op->data.buswidth) + return false; + } + + return true; +} + +static const struct intel_spi_mem_op * +intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op) +{ + const struct intel_spi_mem_op *iop; + + for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) { + if (intel_spi_cmp_mem_op(iop, op)) + break; + } + + return iop->mem_op.cmd.opcode ? iop : NULL; +} + +static bool intel_spi_supports_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); + const struct intel_spi_mem_op *iop; + + iop = intel_spi_match_mem_op(ispi, op); + if (!iop) { + dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode); + return false; + } + + /* + * For software sequencer check that the opcode is actually + * present in the opmenu if it is locked. + */ + if (ispi->swseq_reg && ispi->locked) { + int i; + + /* Check if it is in the locked opcodes list */ + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) { + if (ispi->opcodes[i] == op->cmd.opcode) + return true; + } + + dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode); + return false; + } + + return true; +} + +static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); + const struct intel_spi_mem_op *iop; + + iop = intel_spi_match_mem_op(ispi, op); + if (!iop) + return -EOPNOTSUPP; + + return iop->exec_op(ispi, iop, op); +} + +static const char *intel_spi_get_name(struct spi_mem *mem) +{ + const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); + + /* + * Return name of the flash controller device to be compatible + * with the MTD version. + */ + return dev_name(ispi->dev); +} + +static const struct spi_controller_mem_ops intel_spi_mem_ops = { + .supports_op = intel_spi_supports_mem_op, + .exec_op = intel_spi_exec_mem_op, + .get_name = intel_spi_get_name, +}; + +#define INTEL_SPI_OP_ADDR(__nbytes) \ + { \ + .nbytes = __nbytes, \ + } + +#define INTEL_SPI_OP_NO_DATA \ + { \ + .dir = SPI_MEM_NO_DATA, \ + } + +#define INTEL_SPI_OP_DATA_IN(__buswidth) \ + { \ + .dir = SPI_MEM_DATA_IN, \ + .buswidth = __buswidth, \ + } + +#define INTEL_SPI_OP_DATA_OUT(__buswidth) \ + { \ + .dir = SPI_MEM_DATA_OUT, \ + .buswidth = __buswidth, \ + } + +#define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \ + { \ + .mem_op = { \ + .cmd = __cmd, \ + .addr = __addr, \ + .data = __data, \ + }, \ + .exec_op = __exec_op, \ + } + +#define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \ + { \ + .mem_op = { \ + .cmd = __cmd, \ + .addr = __addr, \ + .data = __data, \ + }, \ + .exec_op = __exec_op, \ + .replacement_op = __repl, \ + } + +/* + * The controller handles pretty much everything internally based on the + * SFDP data but we want to make sure we only support the operations + * actually possible. Only check buswidth and transfer direction, the + * core validates data. + */ +#define INTEL_SPI_GENERIC_OPS \ + /* Status register operations */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \ + SPI_MEM_OP_NO_ADDR, \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read_reg), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \ + SPI_MEM_OP_NO_ADDR, \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read_reg), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \ + SPI_MEM_OP_NO_ADDR, \ + INTEL_SPI_OP_DATA_OUT(1), \ + intel_spi_write_reg), \ + /* Normal read */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + /* Fast read */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + /* Read with 4-byte address opcode */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + /* Fast read with 4-byte address opcode */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(1), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(2), \ + intel_spi_read), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_IN(4), \ + intel_spi_read), \ + /* Write operations */ \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \ + INTEL_SPI_OP_ADDR(3), \ + INTEL_SPI_OP_DATA_OUT(1), \ + intel_spi_write), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_OUT(1), \ + intel_spi_write), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + INTEL_SPI_OP_DATA_OUT(1), \ + intel_spi_write), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DATA, \ + intel_spi_write_reg), \ + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DATA, \ + intel_spi_write_reg), \ + /* Erase operations */ \ + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \ + INTEL_SPI_OP_ADDR(3), \ + SPI_MEM_OP_NO_DATA, \ + intel_spi_erase, \ + HSFSTS_CTL_FCYCLE_ERASE), \ + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \ + INTEL_SPI_OP_ADDR(4), \ + SPI_MEM_OP_NO_DATA, \ + intel_spi_erase, \ + HSFSTS_CTL_FCYCLE_ERASE), \ + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \ + INTEL_SPI_OP_ADDR(4), \ + SPI_MEM_OP_NO_DATA, \ + intel_spi_erase, \ + HSFSTS_CTL_FCYCLE_ERASE) \ + +static const struct intel_spi_mem_op generic_mem_ops[] = { + INTEL_SPI_GENERIC_OPS, + { }, +}; + +static const struct intel_spi_mem_op erase_64k_mem_ops[] = { + INTEL_SPI_GENERIC_OPS, + /* 64k sector erase operations */ + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1), + INTEL_SPI_OP_ADDR(3), + SPI_MEM_OP_NO_DATA, + intel_spi_erase, + HSFSTS_CTL_FCYCLE_ERASE_64K), + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1), + INTEL_SPI_OP_ADDR(4), + SPI_MEM_OP_NO_DATA, + intel_spi_erase, + HSFSTS_CTL_FCYCLE_ERASE_64K), + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1), + INTEL_SPI_OP_ADDR(4), + SPI_MEM_OP_NO_DATA, + intel_spi_erase, + HSFSTS_CTL_FCYCLE_ERASE_64K), + { }, +}; + +static int intel_spi_init(struct intel_spi *ispi) +{ + u32 opmenu0, opmenu1, lvscc, uvscc, val; + bool erase_64k = false; + int i; + + switch (ispi->info->type) { + case INTEL_SPI_BYT: + ispi->sregs = ispi->base + BYT_SSFSTS_CTL; + ispi->pregs = ispi->base + BYT_PR; + ispi->nregions = BYT_FREG_NUM; + ispi->pr_num = BYT_PR_NUM; + ispi->swseq_reg = true; + break; + + case INTEL_SPI_LPT: + ispi->sregs = ispi->base + LPT_SSFSTS_CTL; + ispi->pregs = ispi->base + LPT_PR; + ispi->nregions = LPT_FREG_NUM; + ispi->pr_num = LPT_PR_NUM; + ispi->swseq_reg = true; + break; + + case INTEL_SPI_BXT: + ispi->sregs = ispi->base + BXT_SSFSTS_CTL; + ispi->pregs = ispi->base + BXT_PR; + ispi->nregions = BXT_FREG_NUM; + ispi->pr_num = BXT_PR_NUM; + erase_64k = true; + break; + + case INTEL_SPI_CNL: + ispi->sregs = NULL; + ispi->pregs = ispi->base + CNL_PR; + ispi->nregions = CNL_FREG_NUM; + ispi->pr_num = CNL_PR_NUM; + break; + + default: + return -EINVAL; + } + + /* Try to disable write protection if user asked to do so */ + if (writeable && !intel_spi_set_writeable(ispi)) { + dev_warn(ispi->dev, "can't disable chip write protection\n"); + writeable = false; + } + + /* Disable #SMI generation from HW sequencer */ + val = readl(ispi->base + HSFSTS_CTL); + val &= ~HSFSTS_CTL_FSMIE; + writel(val, ispi->base + HSFSTS_CTL); + + /* + * Determine whether erase operation should use HW or SW sequencer. + * + * The HW sequencer has a predefined list of opcodes, with only the + * erase opcode being programmable in LVSCC and UVSCC registers. + * If these registers don't contain a valid erase opcode, erase + * cannot be done using HW sequencer. + */ + lvscc = readl(ispi->base + LVSCC); + uvscc = readl(ispi->base + UVSCC); + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) + ispi->swseq_erase = true; + /* SPI controller on Intel BXT supports 64K erase opcode */ + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) + if (!(lvscc & ERASE_64K_OPCODE_MASK) || + !(uvscc & ERASE_64K_OPCODE_MASK)) + erase_64k = false; + + if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) { + dev_err(ispi->dev, "software sequencer not supported, but required\n"); + return -EINVAL; + } + + /* + * Some controllers can only do basic operations using hardware + * sequencer. All other operations are supposed to be carried out + * using software sequencer. + */ + if (ispi->swseq_reg) { + /* Disable #SMI generation from SW sequencer */ + val = readl(ispi->sregs + SSFSTS_CTL); + val &= ~SSFSTS_CTL_FSMIE; + writel(val, ispi->sregs + SSFSTS_CTL); + } + + /* Check controller's lock status */ + val = readl(ispi->base + HSFSTS_CTL); + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); + + if (ispi->locked && ispi->sregs) { + /* + * BIOS programs allowed opcodes and then locks down the + * register. So read back what opcodes it decided to support. + * That's the set we are going to support as well. + */ + opmenu0 = readl(ispi->sregs + OPMENU0); + opmenu1 = readl(ispi->sregs + OPMENU1); + + if (opmenu0 && opmenu1) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { + ispi->opcodes[i] = opmenu0 >> i * 8; + ispi->opcodes[i + 4] = opmenu1 >> i * 8; + } + } + } + + if (erase_64k) { + dev_dbg(ispi->dev, "Using erase_64k memory operations"); + ispi->mem_ops = erase_64k_mem_ops; + } else { + dev_dbg(ispi->dev, "Using generic memory operations"); + ispi->mem_ops = generic_mem_ops; + } + + intel_spi_dump_regs(ispi); + return 0; +} + +static bool intel_spi_is_protected(const struct intel_spi *ispi, + unsigned int base, unsigned int limit) +{ + int i; + + for (i = 0; i < ispi->pr_num; i++) { + u32 pr_base, pr_limit, pr_value; + + pr_value = readl(ispi->pregs + PR(i)); + if (!(pr_value & (PR_WPE | PR_RPE))) + continue; + + pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + pr_base = pr_value & PR_BASE_MASK; + + if (pr_base >= base && pr_limit <= limit) + return true; + } + + return false; +} + +/* + * There will be a single partition holding all enabled flash regions. We + * call this "BIOS". + */ +static void intel_spi_fill_partition(struct intel_spi *ispi, + struct mtd_partition *part) +{ + u64 end; + int i; + + memset(part, 0, sizeof(*part)); + + /* Start from the mandatory descriptor region */ + part->size = 4096; + part->name = "BIOS"; + + /* + * Now try to find where this partition ends based on the flash + * region registers. + */ + for (i = 1; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || limit == 0) + continue; + + /* + * If any of the regions have protection bits set, make the + * whole partition read-only to be on the safe side. + * + * Also if the user did not ask the chip to be writeable + * mask the bit too. + */ + if (!writeable || intel_spi_is_protected(ispi, base, limit)) + part->mask_flags |= MTD_WRITEABLE; + + end = (limit << 12) + 4096; + if (end > part->size) + part->size = end; + } +} + +static int intel_spi_populate_chip(struct intel_spi *ispi) +{ + struct flash_platform_data *pdata; + struct spi_board_info chip; + + pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->nr_parts = 1; + pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts), + pdata->nr_parts, GFP_KERNEL); + if (!pdata->parts) + return -ENOMEM; + + intel_spi_fill_partition(ispi, pdata->parts); + + memset(&chip, 0, sizeof(chip)); + snprintf(chip.modalias, 8, "spi-nor"); + chip.platform_data = pdata; + + return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV; +} + +/** + * intel_spi_probe() - Probe the Intel SPI flash controller + * @dev: Pointer to the parent device + * @mem: MMIO resource + * @info: Platform spefific information + * + * Probes Intel SPI flash controller and creates the flash chip device. + * Returns %0 on success and negative errno in case of failure. + */ +int intel_spi_probe(struct device *dev, struct resource *mem, + const struct intel_spi_boardinfo *info) +{ + struct spi_controller *master; + struct intel_spi *ispi; + int ret; + + master = devm_spi_alloc_master(dev, sizeof(*ispi)); + if (!master) + return -ENOMEM; + + master->mem_ops = &intel_spi_mem_ops; + + ispi = spi_master_get_devdata(master); + + ispi->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(ispi->base)) + return PTR_ERR(ispi->base); + + ispi->dev = dev; + ispi->master = master; + ispi->info = info; + + ret = intel_spi_init(ispi); + if (ret) + return ret; + + ret = devm_spi_register_master(dev, master); + if (ret) + return ret; + + return intel_spi_populate_chip(ispi); +} +EXPORT_SYMBOL_GPL(intel_spi_probe); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h new file mode 100644 index 000000000000..a4f0327a46ff --- /dev/null +++ b/drivers/spi/spi-intel.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016 - 2022, Intel Corporation + * Author: Mika Westerberg + */ + +#ifndef SPI_INTEL_H +#define SPI_INTEL_H + +#include + +struct resource; + +int intel_spi_probe(struct device *dev, struct resource *mem, + const struct intel_spi_boardinfo *info); + +#endif /* SPI_INTEL_H */ diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index 39967a5eca6d..ea4a4b1b246a 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -8,7 +8,7 @@ #ifndef LPC_ICH_H #define LPC_ICH_H -#include +#include /* GPIO resources */ #define ICH_RES_GPIO 0 diff --git a/include/linux/platform_data/x86/intel-spi.h b/include/linux/platform_data/x86/intel-spi.h deleted file mode 100644 index 7dda3f690465..000000000000 --- a/include/linux/platform_data/x86/intel-spi.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Intel PCH/PCU SPI flash driver. - * - * Copyright (C) 2016, Intel Corporation - * Author: Mika Westerberg - */ - -#ifndef INTEL_SPI_PDATA_H -#define INTEL_SPI_PDATA_H - -enum intel_spi_type { - INTEL_SPI_BYT = 1, - INTEL_SPI_LPT, - INTEL_SPI_BXT, - INTEL_SPI_CNL, -}; - -/** - * struct intel_spi_boardinfo - Board specific data for Intel SPI driver - * @type: Type which this controller is compatible with - * @set_writeable: Try to make the chip writeable (optional) - * @data: Data to be passed to @set_writeable can be %NULL - */ -struct intel_spi_boardinfo { - enum intel_spi_type type; - bool (*set_writeable)(void __iomem *base, void *data); - void *data; -}; - -#endif /* INTEL_SPI_PDATA_H */ diff --git a/include/linux/platform_data/x86/spi-intel.h b/include/linux/platform_data/x86/spi-intel.h new file mode 100644 index 000000000000..a512ec37abbb --- /dev/null +++ b/include/linux/platform_data/x86/spi-intel.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg + */ + +#ifndef SPI_INTEL_PDATA_H +#define SPI_INTEL_PDATA_H + +enum intel_spi_type { + INTEL_SPI_BYT = 1, + INTEL_SPI_LPT, + INTEL_SPI_BXT, + INTEL_SPI_CNL, +}; + +/** + * struct intel_spi_boardinfo - Board specific data for Intel SPI driver + * @type: Type which this controller is compatible with + * @set_writeable: Try to make the chip writeable (optional) + * @data: Data to be passed to @set_writeable can be %NULL + */ +struct intel_spi_boardinfo { + enum intel_spi_type type; + bool (*set_writeable)(void __iomem *base, void *data); + void *data; +}; + +#endif /* SPI_INTEL_PDATA_H */ -- cgit v1.2.3-71-gd317 From f48dc6b9664963107e500aecfc2f4df27dc5afb6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 11 Feb 2022 00:19:54 +0100 Subject: spi: Retire legacy GPIO handling All drivers using GPIOs as chip select have been rewritten to use GPIO descriptors passing the ->use_gpio_descriptors flag. Retire the code and fields used by the legacy GPIO API. Do not drop the ->use_gpio_descriptors flag: it now only indicates that we want to use GPIOs in addition to native chip selects. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20220210231954.807904-1-linus.walleij@linaro.org Signed-off-by: Mark Brown --- drivers/spi/spi.c | 125 +++++++++++------------------------------------- include/linux/spi/spi.h | 14 +----- 2 files changed, 30 insertions(+), 109 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d9832d8e9f44..85f8ae4cc0c0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -542,7 +541,6 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; - spi->cs_gpio = -ENOENT; spi->mode = ctlr->buswidth_override_bits; spin_lock_init(&spi->statistics.lock); @@ -606,11 +604,8 @@ static int __spi_add_device(struct spi_device *spi) return -ENODEV; } - /* Descriptors take precedence */ if (ctlr->cs_gpiods) spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; - else if (ctlr->cs_gpios) - spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; /* * Drivers may modify this initial i/o setup, but will @@ -940,39 +935,30 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->last_cs_enable = enable; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; - if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || - !spi->controller->set_cs_timing) && !activate) { + if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) { spi_delay_exec(&spi->cs_hold, NULL); } if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { + if (spi->cs_gpiod) { if (!(spi->mode & SPI_NO_CS)) { - if (spi->cs_gpiod) { - /* - * Historically ACPI has no means of the GPIO polarity and - * thus the SPISerialBus() resource defines it on the per-chip - * basis. In order to avoid a chain of negations, the GPIO - * polarity is considered being Active High. Even for the cases - * when _DSD() is involved (in the updated versions of ACPI) - * the GPIO CS polarity must be defined Active High to avoid - * ambiguity. That's why we use enable, that takes SPI_CS_HIGH - * into account. - */ - if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi->cs_gpiod, !enable); - else - /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi->cs_gpiod, activate); - } else { - /* - * Invert the enable line, as active low is - * default for SPI. - */ - gpio_set_value_cansleep(spi->cs_gpio, !enable); - } + /* + * Historically ACPI has no means of the GPIO polarity and + * thus the SPISerialBus() resource defines it on the per-chip + * basis. In order to avoid a chain of negations, the GPIO + * polarity is considered being Active High. Even for the cases + * when _DSD() is involved (in the updated versions of ACPI) + * the GPIO CS polarity must be defined Active High to avoid + * ambiguity. That's why we use enable, that takes SPI_CS_HIGH + * into account. + */ + if (has_acpi_companion(&spi->dev)) + gpiod_set_value_cansleep(spi->cs_gpiod, !enable); + else + /* Polarity handled by GPIO library */ + gpiod_set_value_cansleep(spi->cs_gpiod, activate); } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && @@ -982,8 +968,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->set_cs(spi, !enable); } - if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || - !spi->controller->set_cs_timing) { + if (spi->cs_gpiod || !spi->controller->set_cs_timing) { if (activate) spi_delay_exec(&spi->cs_setup, NULL); else @@ -2827,46 +2812,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, } EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); -#ifdef CONFIG_OF -static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) -{ - int nb, i, *cs; - struct device_node *np = ctlr->dev.of_node; - - if (!np) - return 0; - - nb = of_gpio_named_count(np, "cs-gpios"); - ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); - - /* Return error only for an incorrectly formed cs-gpios property */ - if (nb == 0 || nb == -ENOENT) - return 0; - else if (nb < 0) - return nb; - - cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), - GFP_KERNEL); - ctlr->cs_gpios = cs; - - if (!ctlr->cs_gpios) - return -ENOMEM; - - for (i = 0; i < ctlr->num_chipselect; i++) - cs[i] = -ENOENT; - - for (i = 0; i < nb; i++) - cs[i] = of_get_named_gpio(np, "cs-gpios", i); - - return 0; -} -#else -static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) -{ - return 0; -} -#endif - /** * spi_get_gpio_descs() - grab chip select GPIOs for the master * @ctlr: The SPI master to grab GPIO descriptors for @@ -3051,22 +2996,15 @@ int spi_register_controller(struct spi_controller *ctlr) */ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); - if (!spi_controller_is_slave(ctlr)) { - if (ctlr->use_gpio_descriptors) { - status = spi_get_gpio_descs(ctlr); - if (status) - goto free_bus_id; - /* - * A controller using GPIO descriptors always - * supports SPI_CS_HIGH if need be. - */ - ctlr->mode_bits |= SPI_CS_HIGH; - } else { - /* Legacy code path for GPIOs from DT */ - status = of_spi_get_gpio_numbers(ctlr); - if (status) - goto free_bus_id; - } + if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { + status = spi_get_gpio_descs(ctlr); + if (status) + goto free_bus_id; + /* + * A controller using GPIO descriptors always + * supports SPI_CS_HIGH if need be. + */ + ctlr->mode_bits |= SPI_CS_HIGH; } /* @@ -3555,12 +3493,6 @@ int spi_setup(struct spi_device *spi) */ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | SPI_NO_TX | SPI_NO_RX); - /* - * Nothing prevents from working with active-high CS in case if it - * is driven by GPIO. - */ - if (gpio_is_valid(spi->cs_gpio)) - bad_bits &= ~SPI_CS_HIGH; ugly_bits = bad_bits & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); @@ -3686,8 +3618,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || - spi->cs_gpiod || - gpio_is_valid(spi->cs_gpio))) { + spi->cs_gpiod)) { size_t maxsize; int ret; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c3746ff35691..579d71cdf6fa 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -137,9 +137,6 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * for driver coldplugging, and in uevents used for hotplugging * @driver_override: If the name of a driver is written to this attribute, then * the device will bind to the named driver and only the named driver. - * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when - * not using a GPIO line) use cs_gpiod in new drivers by opting in on - * the spi_master. * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when * not using a GPIO line) * @word_delay: delay to be inserted between consecutive @@ -186,7 +183,6 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ struct spi_delay word_delay; /* inter-word delay */ /* CS delays */ @@ -418,17 +414,12 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per - * CS number. Any individual value may be -ENOENT for CS lines that - * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods - * in new drivers. * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab - * GPIO descriptors rather than using global GPIO numbers grabbed by the - * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, - * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. + * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have + * the cs_gpiod assigned if a GPIO line is found for the chipselect. * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will * fill in this field with the first unused native CS, to be used by SPI * controller drivers that need to drive a native CS when using GPIO CS. @@ -642,7 +633,6 @@ struct spi_controller { const struct spi_controller_mem_ops *mem_ops; /* gpio chip select */ - int *cs_gpios; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; s8 unused_native_cs; -- cgit v1.2.3-71-gd317 From 1de785a58035031f81a43c42f355fa1db510dc36 Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Sun, 23 Jan 2022 13:01:05 -0600 Subject: mfd: iqs62x: Provide device revision to sub-devices Individual sub-devices may elect to make decisions based on the specific revision of silicon encountered at probe. This data is already read from the device, but is not retained. Pass this data on to the sub-devices by adding the software and hardware numbers (registers 0x01 and 0x02, respectively) to the iqs62x_core struct. Signed-off-by: Jeff LaBundy Signed-off-by: Lee Jones --- drivers/mfd/iqs62x.c | 6 +++--- include/linux/mfd/iqs62x.h | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c index 9805cf191245..575ab67e243d 100644 --- a/drivers/mfd/iqs62x.c +++ b/drivers/mfd/iqs62x.c @@ -898,7 +898,6 @@ static int iqs62x_probe(struct i2c_client *client) struct iqs62x_info info; unsigned int val; int ret, i, j; - u8 sw_num = 0; const char *fw_name = NULL; iqs62x = devm_kzalloc(&client->dev, sizeof(*iqs62x), GFP_KERNEL); @@ -949,7 +948,8 @@ static int iqs62x_probe(struct i2c_client *client) if (info.sw_num < iqs62x->dev_desc->sw_num) continue; - sw_num = info.sw_num; + iqs62x->sw_num = info.sw_num; + iqs62x->hw_num = info.hw_num; /* * Read each of the device's designated calibration registers, @@ -985,7 +985,7 @@ static int iqs62x_probe(struct i2c_client *client) return -EINVAL; } - if (!sw_num) { + if (!iqs62x->sw_num) { dev_err(&client->dev, "Unrecognized software number: 0x%02X\n", info.sw_num); return -EINVAL; diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h index 5ced55eae11b..ffc86010af74 100644 --- a/include/linux/mfd/iqs62x.h +++ b/include/linux/mfd/iqs62x.h @@ -14,6 +14,11 @@ #define IQS624_PROD_NUM 0x43 #define IQS625_PROD_NUM 0x4E +#define IQS620_HW_NUM_V0 0x82 +#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0 +#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0 +#define IQS620_HW_NUM_V3 0x92 + #define IQS621_ALS_FLAGS 0x16 #define IQS622_ALS_FLAGS 0x14 @@ -129,6 +134,8 @@ struct iqs62x_core { struct completion fw_done; enum iqs62x_ui_sel ui_sel; unsigned long event_cache; + u8 sw_num; + u8 hw_num; }; extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS]; -- cgit v1.2.3-71-gd317 From 5891cd5ec46c2c2eb6427cb54d214b149635dd0e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 11 Feb 2022 12:06:23 -0800 Subject: net_sched: add __rcu annotation to netdev->qdisc syzbot found a data-race [1] which lead me to add __rcu annotations to netdev->qdisc, and proper accessors to get LOCKDEP support. [1] BUG: KCSAN: data-race in dev_activate / qdisc_lookup_rcu write to 0xffff888168ad6410 of 8 bytes by task 13559 on cpu 1: attach_default_qdiscs net/sched/sch_generic.c:1167 [inline] dev_activate+0x2ed/0x8f0 net/sched/sch_generic.c:1221 __dev_open+0x2e9/0x3a0 net/core/dev.c:1416 __dev_change_flags+0x167/0x3f0 net/core/dev.c:8139 rtnl_configure_link+0xc2/0x150 net/core/rtnetlink.c:3150 __rtnl_newlink net/core/rtnetlink.c:3489 [inline] rtnl_newlink+0xf4d/0x13e0 net/core/rtnetlink.c:3529 rtnetlink_rcv_msg+0x745/0x7e0 net/core/rtnetlink.c:5594 netlink_rcv_skb+0x14e/0x250 net/netlink/af_netlink.c:2494 rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:5612 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x602/0x6d0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x728/0x850 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg net/socket.c:725 [inline] ____sys_sendmsg+0x39a/0x510 net/socket.c:2413 ___sys_sendmsg net/socket.c:2467 [inline] __sys_sendmsg+0x195/0x230 net/socket.c:2496 __do_sys_sendmsg net/socket.c:2505 [inline] __se_sys_sendmsg net/socket.c:2503 [inline] __x64_sys_sendmsg+0x42/0x50 net/socket.c:2503 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae read to 0xffff888168ad6410 of 8 bytes by task 13560 on cpu 0: qdisc_lookup_rcu+0x30/0x2e0 net/sched/sch_api.c:323 __tcf_qdisc_find+0x74/0x3a0 net/sched/cls_api.c:1050 tc_del_tfilter+0x1c7/0x1350 net/sched/cls_api.c:2211 rtnetlink_rcv_msg+0x5ba/0x7e0 net/core/rtnetlink.c:5585 netlink_rcv_skb+0x14e/0x250 net/netlink/af_netlink.c:2494 rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:5612 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x602/0x6d0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x728/0x850 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg net/socket.c:725 [inline] ____sys_sendmsg+0x39a/0x510 net/socket.c:2413 ___sys_sendmsg net/socket.c:2467 [inline] __sys_sendmsg+0x195/0x230 net/socket.c:2496 __do_sys_sendmsg net/socket.c:2505 [inline] __se_sys_sendmsg net/socket.c:2503 [inline] __x64_sys_sendmsg+0x42/0x50 net/socket.c:2503 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae value changed: 0xffffffff85dee080 -> 0xffff88815d96ec00 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 13560 Comm: syz-executor.2 Not tainted 5.17.0-rc3-syzkaller-00116-gf1baf68e1383-dirty #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Fixes: 470502de5bdb ("net: sched: unlock rules update API") Signed-off-by: Eric Dumazet Cc: Vlad Buslov Reported-by: syzbot Cc: Jamal Hadi Salim Cc: Cong Wang Cc: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- net/core/rtnetlink.c | 6 ++++-- net/sched/cls_api.c | 6 +++--- net/sched/sch_api.c | 22 ++++++++++++---------- net/sched/sch_generic.c | 29 ++++++++++++++++------------- 5 files changed, 36 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e490b84732d1..8b5a314db167 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2158,7 +2158,7 @@ struct net_device { struct netdev_queue *_tx ____cacheline_aligned_in_smp; unsigned int num_tx_queues; unsigned int real_num_tx_queues; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 710da8a36729..2fb8eb6791e8 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1699,6 +1699,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, { struct ifinfomsg *ifm; struct nlmsghdr *nlh; + struct Qdisc *qdisc; ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); @@ -1716,6 +1717,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) goto nla_put_failure; + qdisc = rtnl_dereference(dev->qdisc); if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || nla_put_u8(skb, IFLA_OPERSTATE, @@ -1735,8 +1737,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, #endif put_master_ifindex(skb, dev) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || - (dev->qdisc && - nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || + (qdisc && + nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || nla_put_ifalias(skb, dev) || nla_put_u32(skb, IFLA_CARRIER_CHANGES, atomic_read(&dev->carrier_up_count) + diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5f0f346b576f..5ce1208a6ea3 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, /* Find qdisc */ if (!*parent) { - *q = dev->qdisc; + *q = rcu_dereference(dev->qdisc); *parent = (*q)->handle; } else { *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); @@ -2587,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) parent = tcm->tcm_parent; if (!parent) - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) @@ -2962,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; if (!tcm->tcm_parent) - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 179825a3b2fd..e3c0e8ea2dbb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle); if (q) goto out; @@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle); if (q) goto out; @@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { notify_and_destroy(net, skb, n, classid, - dev->qdisc, new); + rtnl_dereference(dev->qdisc), new); if (new && !new->ops->attach) qdisc_refcount_inc(new); - dev->qdisc = new ? : &noop_qdisc; + rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); if (new && new->ops->attach) new->ops->attach(new); @@ -1451,7 +1451,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } if (!q) { NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); @@ -1540,7 +1540,7 @@ replay: q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } /* It may be default qdisc, ignore it */ @@ -1762,7 +1762,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; - if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, + if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), + skb, cb, &q_idx, s_q_idx, true, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; @@ -2033,7 +2034,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, } else if (qid1) { qid = qid1; } else if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; /* Now qid is genuine qdisc handle consistent * both with parent and child. @@ -2044,7 +2045,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, portid = TC_H_MAKE(qid, portid); } else { if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; } /* OK. Locate qdisc */ @@ -2205,7 +2206,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) + if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc), + skb, tcm, cb, &t, s_t, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index f893d9a81b01..5bab9f8b8f45 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1164,30 +1164,33 @@ static void attach_default_qdiscs(struct net_device *dev) if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); if (qdisc) { - dev->qdisc = qdisc; + rcu_assign_pointer(dev->qdisc, qdisc); qdisc->ops->attach(qdisc); } } + qdisc = rtnl_dereference(dev->qdisc); /* Detect default qdisc setup/init failed and fallback to "noqueue" */ - if (dev->qdisc == &noop_qdisc) { + if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; } #ifdef CONFIG_NET_SCHED - if (dev->qdisc != &noop_qdisc) - qdisc_hash_add(dev->qdisc, false); + if (qdisc != &noop_qdisc) + qdisc_hash_add(qdisc, false); #endif } @@ -1217,7 +1220,7 @@ void dev_activate(struct net_device *dev) * and noqueue_qdisc for virtual interfaces */ - if (dev->qdisc == &noop_qdisc) + if (rtnl_dereference(dev->qdisc) == &noop_qdisc) attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) @@ -1383,7 +1386,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev, void dev_qdisc_change_real_num_tx(struct net_device *dev, unsigned int new_real_tx) { - struct Qdisc *qdisc = dev->qdisc; + struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); if (qdisc->ops->change_real_num_tx) qdisc->ops->change_real_num_tx(qdisc, new_real_tx); @@ -1447,7 +1450,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { - dev->qdisc = &noop_qdisc; + rcu_assign_pointer(dev->qdisc, &noop_qdisc); netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); @@ -1475,8 +1478,8 @@ void dev_shutdown(struct net_device *dev) netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); - qdisc_put(dev->qdisc); - dev->qdisc = &noop_qdisc; + qdisc_put(rtnl_dereference(dev->qdisc)); + rcu_assign_pointer(dev->qdisc, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } -- cgit v1.2.3-71-gd317 From baebdf48c360080710f80699eea3affbb13d6c65 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 12 Feb 2022 00:38:38 +0100 Subject: net: dev: Makes sure netif_rx() can be invoked in any context. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dave suggested a while ago (eleven years by now) "Let's make netif_rx() work in all contexts and get rid of netif_rx_ni()". Eric agreed and pointed out that modern devices should use netif_receive_skb() to avoid the overhead. In the meantime someone added another variant, netif_rx_any_context(), which behaves as suggested. netif_rx() must be invoked with disabled bottom halves to ensure that pending softirqs, which were raised within the function, are handled. netif_rx_ni() can be invoked only from process context (bottom halves must be enabled) because the function handles pending softirqs without checking if bottom halves were disabled or not. netif_rx_any_context() invokes on the former functions by checking in_interrupts(). netif_rx() could be taught to handle both cases (disabled and enabled bottom halves) by simply disabling bottom halves while invoking netif_rx_internal(). The local_bh_enable() invocation will then invoke pending softirqs only if the BH-disable counter drops to zero. Eric is concerned about the overhead of BH-disable+enable especially in regard to the loopback driver. As critical as this driver is, it will receive a shortcut to avoid the additional overhead which is not needed. Add a local_bh_disable() section in netif_rx() to ensure softirqs are handled if needed. Provide __netif_rx() which does not disable BH and has a lockdep assert to ensure that interrupts are disabled. Use this shortcut in the loopback driver and in drivers/net/*.c. Make netif_rx_ni() and netif_rx_any_context() invoke netif_rx() so they can be removed once they are no more users left. Link: https://lkml.kernel.org/r/20100415.020246.218622820.davem@davemloft.net Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Eric Dumazet Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- drivers/net/amt.c | 4 +-- drivers/net/geneve.c | 4 +-- drivers/net/gtp.c | 2 +- drivers/net/loopback.c | 4 +-- drivers/net/macsec.c | 6 ++--- drivers/net/macvlan.c | 4 +-- drivers/net/mhi_net.c | 2 +- drivers/net/ntb_netdev.c | 2 +- drivers/net/rionet.c | 2 +- drivers/net/sb1000.c | 2 +- drivers/net/veth.c | 2 +- drivers/net/vrf.c | 2 +- drivers/net/vxlan.c | 2 +- include/linux/netdevice.h | 14 ++++++++-- include/trace/events/net.h | 14 ---------- net/core/dev.c | 67 ++++++++++++++++++++-------------------------- 16 files changed, 60 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/amt.c b/drivers/net/amt.c index f1a36d7e2151..10455c9b9da0 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -2373,7 +2373,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); dev_sw_netstats_rx_add(amt->dev, len); } else { @@ -2470,7 +2470,7 @@ report: skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE, true); dev_sw_netstats_rx_add(amt->dev, len); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index c1fdd721a730..a895ff756093 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -925,7 +925,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } skb->protocol = eth_type_trans(skb, geneve->dev); - netif_rx(skb); + __netif_rx(skb); dst_release(&rt->dst); return -EMSGSIZE; } @@ -1021,7 +1021,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, } skb->protocol = eth_type_trans(skb, geneve->dev); - netif_rx(skb); + __netif_rx(skb); dst_release(dst); return -EMSGSIZE; } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 24e5c54d06c1..bf087171bcf0 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -207,7 +207,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, dev_sw_netstats_rx_add(pctx->dev, skb->len); - netif_rx(skb); + __netif_rx(skb); return 0; err: diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index ed0edf5884ef..d05f86fe78c9 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -78,7 +78,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb_orphan(skb); - /* Before queueing this packet to netif_rx(), + /* Before queueing this packet to __netif_rx(), * make sure dst is refcounted. */ skb_dst_force(skb); @@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb->protocol = eth_type_trans(skb, dev); len = skb->len; - if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) dev_lstats_add(dev, len); return NETDEV_TX_OK; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3d0874331763..832f09ac075e 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1033,7 +1033,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) else nskb->pkt_type = PACKET_MULTICAST; - netif_rx(nskb); + __netif_rx(nskb); } continue; } @@ -1056,7 +1056,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) nskb->dev = ndev; - if (netif_rx(nskb) == NET_RX_SUCCESS) { + if (__netif_rx(nskb) == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); @@ -1288,7 +1288,7 @@ nosci: macsec_reset_skb(nskb, macsec->secy.netdev); - ret = netif_rx(nskb); + ret = __netif_rx(nskb); if (ret == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUnknownSCI++; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6ef5f77be4d0..d87c06c317ed 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb, if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr)) nskb->pkt_type = PACKET_HOST; - ret = netif_rx(nskb); + ret = __netif_rx(nskb); macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false); } @@ -468,7 +468,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) /* forward to original port. */ vlan = src; ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: - netif_rx(skb); + __netif_rx(skb); handle_res = RX_HANDLER_CONSUMED; goto out; } diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index aaa628f859fd..0b1b6f650104 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, u64_stats_inc(&mhi_netdev->stats.rx_packets); u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); u64_stats_update_end(&mhi_netdev->stats.rx_syncp); - netif_rx(skb); + __netif_rx(skb); } /* Refill if RX buffers queue becomes low */ diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 98ca6b18415e..80bdc07f2cd3 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; - if (netif_rx(skb) == NET_RX_DROP) { + if (__netif_rx(skb) == NET_RX_DROP) { ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } else { diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 1a95f3beb784..39e61e07e489 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev) skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); rnet->rx_skb[i]->protocol = eth_type_trans(rnet->rx_skb[i], ndev); - error = netif_rx(rnet->rx_skb[i]); + error = __netif_rx(rnet->rx_skb[i]); if (error == NET_RX_DROP) { ndev->stats.rx_dropped++; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 57a6d598467b..c3f8020571ad 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3 /* datagram completed: send to upper level */ skb_trim(skb, dlen); - netif_rx(skb); + __netif_rx(skb); stats->rx_bytes+=dlen; stats->rx_packets++; lp->rx_skb[ns] = NULL; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index d29fb9759cc9..58b20ea171dd 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -287,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb, { return __dev_forward_skb(dev, skb) ?: xdp ? veth_xdp_rx(rq, skb) : - netif_rx(skb); + __netif_rx(skb); } /* return true if the specified skb has chances of GRO aggregation diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index e0b1ab99a359..714cafcf6c6c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, skb->protocol = eth_type_trans(skb, dev); - if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else this_cpu_inc(dev->dstats->rx_drps); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 359d16780dbb..d0dc90d3dac2 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2541,7 +2541,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, tx_stats->tx_bytes += len; u64_stats_update_end(&tx_stats->syncp); - if (netif_rx(skb) == NET_RX_SUCCESS) { + if (__netif_rx(skb) == NET_RX_SUCCESS) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += len; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5f6e2c0b0c90..e99db8341da5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3672,8 +3672,18 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); -int netif_rx_ni(struct sk_buff *skb); -int netif_rx_any_context(struct sk_buff *skb); +int __netif_rx(struct sk_buff *skb); + +static inline int netif_rx_ni(struct sk_buff *skb) +{ + return netif_rx(skb); +} + +static inline int netif_rx_any_context(struct sk_buff *skb) +{ + return netif_rx(skb); +} + int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); void netif_receive_skb_list_internal(struct list_head *head); diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 78c448c6ab4c..032b431b987b 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -260,13 +260,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, TP_ARGS(skb) ); -DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, - - TP_PROTO(const struct sk_buff *skb), - - TP_ARGS(skb) -); - DECLARE_EVENT_CLASS(net_dev_rx_exit_template, TP_PROTO(int ret), @@ -312,13 +305,6 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit, TP_ARGS(ret) ); -DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit, - - TP_PROTO(int ret), - - TP_ARGS(ret) -); - DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit, TP_PROTO(int ret), diff --git a/net/core/dev.c b/net/core/dev.c index 441bacf79199..ce5047e8db1f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4815,66 +4815,57 @@ static int netif_rx_internal(struct sk_buff *skb) return ret; } +/** + * __netif_rx - Slightly optimized version of netif_rx + * @skb: buffer to post + * + * This behaves as netif_rx except that it does not disable bottom halves. + * As a result this function may only be invoked from the interrupt context + * (either hard or soft interrupt). + */ +int __netif_rx(struct sk_buff *skb) +{ + int ret; + + lockdep_assert_once(hardirq_count() | softirq_count()); + + trace_netif_rx_entry(skb); + ret = netif_rx_internal(skb); + trace_netif_rx_exit(ret); + return ret; +} +EXPORT_SYMBOL(__netif_rx); + /** * netif_rx - post buffer to the network code * @skb: buffer to post * * This function receives a packet from a device driver and queues it for - * the upper (protocol) levels to process. It always succeeds. The buffer - * may be dropped during processing for congestion control or by the - * protocol layers. + * the upper (protocol) levels to process via the backlog NAPI device. It + * always succeeds. The buffer may be dropped during processing for + * congestion control or by the protocol layers. + * The network buffer is passed via the backlog NAPI device. Modern NIC + * driver should use NAPI and GRO. + * This function can used from any context. * * return values: * NET_RX_SUCCESS (no congestion) * NET_RX_DROP (packet was dropped) * */ - int netif_rx(struct sk_buff *skb) { int ret; + local_bh_disable(); trace_netif_rx_entry(skb); - ret = netif_rx_internal(skb); trace_netif_rx_exit(ret); - + local_bh_enable(); return ret; } EXPORT_SYMBOL(netif_rx); -int netif_rx_ni(struct sk_buff *skb) -{ - int err; - - trace_netif_rx_ni_entry(skb); - - preempt_disable(); - err = netif_rx_internal(skb); - if (local_softirq_pending()) - do_softirq(); - preempt_enable(); - trace_netif_rx_ni_exit(err); - - return err; -} -EXPORT_SYMBOL(netif_rx_ni); - -int netif_rx_any_context(struct sk_buff *skb) -{ - /* - * If invoked from contexts which do not invoke bottom half - * processing either at return from interrupt or when softrqs are - * reenabled, use netif_rx_ni() which invokes bottomhalf processing - * directly. - */ - if (in_interrupt()) - return netif_rx(skb); - else - return netif_rx_ni(skb); -} -EXPORT_SYMBOL(netif_rx_any_context); - static __latent_entropy void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); -- cgit v1.2.3-71-gd317 From 76f05d88623e559eb9fc41db9fb911e67fab0e7a Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Mon, 14 Feb 2022 12:46:52 +0530 Subject: net: wwan: debugfs obtained dev reference not dropped WWAN driver call's wwan_get_debugfs_dir() to obtain WWAN debugfs dir entry. As part of this procedure it returns a reference to a found device. Since there is no debugfs interface available at WWAN subsystem, it is not possible to drop dev reference post debugfs use. This leads to side effects like post wwan driver load and reload the wwan instance gets increment from wwanX to wwanX+1. A new debugfs interface is added in wwan subsystem so that wwan driver can drop the obtained dev reference post debugfs use. void wwan_put_debugfs_dir(struct dentry *dir) Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/wwan_core.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/wwan.h | 2 ++ 2 files changed, 38 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 1508dc2a497b..b8c7843730ed 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -160,6 +160,42 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent) return wwandev->debugfs_dir; } EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); + +static int wwan_dev_debugfs_match(struct device *dev, const void *dir) +{ + struct wwan_device *wwandev; + + if (dev->type != &wwan_dev_type) + return 0; + + wwandev = to_wwan_dev(dev); + + return wwandev->debugfs_dir == dir; +} + +static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir) +{ + struct device *dev; + + dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match); + if (!dev) + return ERR_PTR(-ENODEV); + + return to_wwan_dev(dev); +} + +void wwan_put_debugfs_dir(struct dentry *dir) +{ + struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir); + + if (WARN_ON(IS_ERR(wwandev))) + return; + + /* wwan_dev_get_by_debugfs() also got a reference */ + put_device(&wwandev->dev); + put_device(&wwandev->dev); +} +EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir); #endif /* This function allocates and registers a new WWAN device OR if a WWAN device diff --git a/include/linux/wwan.h b/include/linux/wwan.h index afb3334ec8c5..5ce2acf444fb 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -174,11 +174,13 @@ void wwan_unregister_ops(struct device *parent); #ifdef CONFIG_WWAN_DEBUGFS struct dentry *wwan_get_debugfs_dir(struct device *parent); +void wwan_put_debugfs_dir(struct dentry *dir); #else static inline struct dentry *wwan_get_debugfs_dir(struct device *parent) { return ERR_PTR(-ENODEV); } +static inline void wwan_put_debugfs_dir(struct dentry *dir) {} #endif #endif /* __WWAN_H */ -- cgit v1.2.3-71-gd317 From 32e92d9f6f876721cc4f565f8369d542b6266380 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 3 Feb 2022 17:59:20 +0800 Subject: iommu/iova: Separate out rcache init Currently the rcache structures are allocated for all IOVA domains, even if they do not use "fast" alloc+free interface. This is wasteful of memory. In addition, fails in init_iova_rcaches() are not handled safely, which is less than ideal. Make "fast" users call a separate rcache init explicitly, which includes error checking. Signed-off-by: John Garry Reviewed-by: Robin Murphy Acked-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/1643882360-241739-1-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 4 ++ drivers/iommu/iova.c | 73 +++++++++++++++++++++++++++++------- drivers/vdpa/vdpa_user/iova_domain.c | 11 ++++++ include/linux/iova.h | 15 ++------ 4 files changed, 77 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d85d54f2b549..b22034975301 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; struct iova_domain *iovad; + int ret; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } init_iova_domain(iovad, 1UL << order, base_pfn); + ret = iova_domain_init_rcaches(iovad); + if (ret) + return ret; /* If the FQ fails we can simply fall back to strict mode */ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b28c9435b898..7e9c3a97c040 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -15,13 +15,14 @@ /* The anchor node sits above the top of the usable address space */ #define IOVA_ANCHOR ~0UL +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ + static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, unsigned long size); static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn); -static void init_iova_rcaches(struct iova_domain *iovad); static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); @@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); - cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead); - init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -488,6 +487,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); +static void iova_domain_free_rcaches(struct iova_domain *iovad) +{ + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + free_iova_rcaches(iovad); +} + /** * put_iova_domain - destroys the iova domain * @iovad: - iova domain in question. @@ -497,9 +503,9 @@ void put_iova_domain(struct iova_domain *iovad) { struct iova *iova, *tmp; - cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, - &iovad->cpuhp_dead); - free_iova_rcaches(iovad); + if (iovad->rcaches) + iova_domain_free_rcaches(iovad); + rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) free_iova_mem(iova); } @@ -608,6 +614,7 @@ EXPORT_SYMBOL_GPL(reserve_iova); */ #define IOVA_MAG_SIZE 128 +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ struct iova_magazine { unsigned long size; @@ -620,6 +627,13 @@ struct iova_cpu_rcache { struct iova_magazine *prev; }; +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; +}; + static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { return kzalloc(sizeof(struct iova_magazine), flags); @@ -693,28 +707,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) mag->pfns[mag->size++] = pfn; } -static void init_iova_rcaches(struct iova_domain *iovad) +int iova_domain_init_rcaches(struct iova_domain *iovad) { - struct iova_cpu_rcache *cpu_rcache; - struct iova_rcache *rcache; unsigned int cpu; - int i; + int i, ret; + + iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE, + sizeof(struct iova_rcache), + GFP_KERNEL); + if (!iovad->rcaches) + return -ENOMEM; for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + rcache = &iovad->rcaches[i]; spin_lock_init(&rcache->lock); rcache->depot_size = 0; - rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); - if (WARN_ON(!rcache->cpu_rcaches)) - continue; + rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), + cache_line_size()); + if (!rcache->cpu_rcaches) { + ret = -ENOMEM; + goto out_err; + } for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_init(&cpu_rcache->lock); cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); + if (!cpu_rcache->loaded || !cpu_rcache->prev) { + ret = -ENOMEM; + goto out_err; + } } } + + ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + if (ret) + goto out_err; + return 0; + +out_err: + free_iova_rcaches(iovad); + return ret; } +EXPORT_SYMBOL_GPL(iova_domain_init_rcaches); /* * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and @@ -831,7 +871,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, { unsigned int log_size = order_base_2(size); - if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) + if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) return 0; return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); @@ -849,6 +889,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache = &iovad->rcaches[i]; + if (!rcache->cpu_rcaches) + break; for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); iova_magazine_free(cpu_rcache->loaded); @@ -858,6 +900,9 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (j = 0; j < rcache->depot_size; ++j) iova_magazine_free(rcache->depot[j]); } + + kfree(iovad->rcaches); + iovad->rcaches = NULL; } /* diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 2b1143f11d8f..22f7d43f8a68 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) struct file *file; struct vduse_bounce_map *map; unsigned long pfn, bounce_pfns; + int ret; bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT; if (iova_limit <= bounce_size) @@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) spin_lock_init(&domain->iotlb_lock); init_iova_domain(&domain->stream_iovad, PAGE_SIZE, IOVA_START_PFN); + ret = iova_domain_init_rcaches(&domain->stream_iovad); + if (ret) + goto err_iovad_stream; init_iova_domain(&domain->consistent_iovad, PAGE_SIZE, bounce_pfns); + ret = iova_domain_init_rcaches(&domain->consistent_iovad); + if (ret) + goto err_iovad_consistent; return domain; +err_iovad_consistent: + put_iova_domain(&domain->stream_iovad); +err_iovad_stream: + fput(file); err_file: vfree(domain->bounce_maps); err_map: diff --git a/include/linux/iova.h b/include/linux/iova.h index cea79cb9f26c..320a70e40233 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -21,18 +21,8 @@ struct iova { unsigned long pfn_lo; /* Lowest allocated pfn */ }; -struct iova_magazine; -struct iova_cpu_rcache; -#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ -#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ - -struct iova_rcache { - spinlock_t lock; - unsigned long depot_size; - struct iova_magazine *depot[MAX_GLOBAL_MAGS]; - struct iova_cpu_rcache __percpu *cpu_rcaches; -}; +struct iova_rcache; /* holds all the iova translations for a domain */ struct iova_domain { @@ -46,7 +36,7 @@ struct iova_domain { unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova anchor; /* rbtree lookup anchor */ - struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ + struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; }; @@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +int iova_domain_init_rcaches(struct iova_domain *iovad); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else -- cgit v1.2.3-71-gd317 From b2638e56c2ced2ca258d22f939c47327b189e00c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 3 Feb 2022 14:56:13 +0200 Subject: device property: Don't split fwnode_get_irq*() APIs in the code New fwnode_get_irq_byname() landed after an unrelated function by ordering. Move fwnode_iomap(), so fwnode_get_irq*() APIs will go together. No functional change intended. Signed-off-by: Andy Shevchenko Reviewed-by: Sakari Ailus Signed-off-by: Rafael J. Wysocki --- drivers/base/property.c | 32 ++++++++++++++++---------------- include/linux/property.h | 6 +++--- 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/property.c b/drivers/base/property.c index fc59e0f7f9cc..c0e94cce9c29 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -895,6 +895,22 @@ int device_get_phy_mode(struct device *dev) } EXPORT_SYMBOL_GPL(device_get_phy_mode); +/** + * fwnode_iomap - Maps the memory mapped IO for a given fwnode + * @fwnode: Pointer to the firmware node + * @index: Index of the IO range + * + * Returns a pointer to the mapped memory. + */ +void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) +{ + if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode)) + return of_iomap(to_of_node(fwnode), index); + + return NULL; +} +EXPORT_SYMBOL(fwnode_iomap); + /** * fwnode_irq_get - Get IRQ directly from a fwnode * @fwnode: Pointer to the firmware node @@ -919,22 +935,6 @@ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index) } EXPORT_SYMBOL(fwnode_irq_get); -/** - * fwnode_iomap - Maps the memory mapped IO for a given fwnode - * @fwnode: Pointer to the firmware node - * @index: Index of the IO range - * - * Returns a pointer to the mapped memory. - */ -void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) -{ - if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode)) - return of_iomap(to_of_node(fwnode), index); - - return NULL; -} -EXPORT_SYMBOL(fwnode_iomap); - /** * fwnode_irq_get_byname - Get IRQ from a fwnode using its name * @fwnode: Pointer to the firmware node diff --git a/include/linux/property.h b/include/linux/property.h index 95d56a562b6a..4cd4b326941f 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -123,8 +123,6 @@ void fwnode_handle_put(struct fwnode_handle *fwnode); int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); -void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index); - unsigned int device_get_child_node_count(struct device *dev); static inline bool device_property_read_bool(struct device *dev, @@ -388,8 +386,10 @@ enum dev_dma_attr device_get_dma_attr(struct device *dev); const void *device_get_match_data(struct device *dev); int device_get_phy_mode(struct device *dev); - int fwnode_get_phy_mode(struct fwnode_handle *fwnode); + +void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index); + struct fwnode_handle *fwnode_graph_get_next_endpoint( const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * -- cgit v1.2.3-71-gd317 From 7a853c2d5951419fdf3c1c9d2b6f5a38f6a6857d Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 7 Feb 2022 15:02:45 -0800 Subject: mm: Change CONFIG option for mm->pasid field This currently depends on CONFIG_IOMMU_SUPPORT. But it is only needed when CONFIG_IOMMU_SVA option is enabled. Change the CONFIG guards around definition and initialization of mm->pasid field. Suggested-by: Jacob Pan Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Thomas Gleixner Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/20220207230254.3342514-3-fenghua.yu@intel.com --- include/linux/mm_types.h | 2 +- kernel/fork.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5140e5feb486..c5cbfd7915ad 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -631,7 +631,7 @@ struct mm_struct { #endif struct work_struct async_put_work; -#ifdef CONFIG_IOMMU_SUPPORT +#ifdef CONFIG_IOMMU_SVA u32 pasid; #endif } __randomize_layout; diff --git a/kernel/fork.c b/kernel/fork.c index d75a528f7b21..6ee7551d3bd2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1021,7 +1021,7 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) static void mm_init_pasid(struct mm_struct *mm) { -#ifdef CONFIG_IOMMU_SUPPORT +#ifdef CONFIG_IOMMU_SVA mm->pasid = INIT_PASID; #endif } -- cgit v1.2.3-71-gd317 From 150154aae4311e7e6458903baecdc8fffe981ed3 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Wed, 1 Dec 2021 10:20:53 +0100 Subject: rcu: Fix description of kvfree_rcu() The kvfree_rcu() header comment's description of the "ptr" parameter is unclear, therefore rephrase it to make it clear that it is a pointer to the memory to eventually be passed to kvfree(). Reported-by: Steven Rostedt Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 88b42eb46406..9d7df8d36af0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -924,7 +924,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * * kvfree_rcu(ptr); * - * where @ptr is a pointer to kvfree(). + * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() -- cgit v1.2.3-71-gd317 From 58d4292bd037b01fbb940a5170817f7d40caa9d5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 14 Jan 2022 16:07:28 -0800 Subject: rcu: Uninline multi-use function: finish_rcuwait() This is a rarely used function, so uninlining its 3 instructions is probably a win or a wash - but the main motivation is to make independent of task_struct details. Signed-off-by: Ingo Molnar Signed-off-by: Paul E. McKenney --- include/linux/rcuwait.h | 6 +----- kernel/rcu/update.c | 7 +++++++ 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 61c56cca95c4..8052d34da782 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -47,11 +47,7 @@ static inline void prepare_to_rcuwait(struct rcuwait *w) rcu_assign_pointer(w->task, current); } -static inline void finish_rcuwait(struct rcuwait *w) -{ - rcu_assign_pointer(w->task, NULL); - __set_current_state(TASK_RUNNING); -} +extern void finish_rcuwait(struct rcuwait *w); #define rcuwait_wait_event(w, condition, state) \ ({ \ diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 156892c22bb5..180ff9c41fa8 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -407,6 +407,13 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, } EXPORT_SYMBOL_GPL(__wait_rcu_gp); +void finish_rcuwait(struct rcuwait *w) +{ + rcu_assign_pointer(w->task, NULL); + __set_current_state(TASK_RUNNING); +} +EXPORT_SYMBOL_GPL(finish_rcuwait); + #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head) { -- cgit v1.2.3-71-gd317 From e6339d3b443c436c3b8f45eefec2212a8c07065d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 14 Jan 2022 16:16:55 -0800 Subject: rcu: Remove __read_mostly annotations from rcu_scheduler_active externs Remove the __read_mostly attributes from the rcu_scheduler_active extern declarations, because these attributes are ignored for prototypes and we'd have to include the full header to gain this functionally pointless attribute defined. Signed-off-by: Ingo Molnar Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 +- include/linux/rcutree.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9d7df8d36af0..e7c39c200e2b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -extern int rcu_scheduler_active __read_mostly; +extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 53209d669400..76665db179fa 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { } void exit_rcu(void); void rcu_scheduler_starting(void); -extern int rcu_scheduler_active __read_mostly; +extern int rcu_scheduler_active; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); -- cgit v1.2.3-71-gd317 From 7a5fbc9bcba5325a45297a4ba00091f39a63a1ed Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 7 Feb 2022 15:02:46 -0800 Subject: iommu/ioasid: Introduce a helper to check for valid PASIDs Define a pasid_valid() helper to check if a given PASID is valid. [ bp: Massage commit message. ] Suggested-by: Ashok Raj Suggested-by: Jacob Pan Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Thomas Gleixner Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/20220207230254.3342514-4-fenghua.yu@intel.com --- include/linux/ioasid.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h index e9dacd4b9f6b..2237f64dbaae 100644 --- a/include/linux/ioasid.h +++ b/include/linux/ioasid.h @@ -41,6 +41,10 @@ void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, int ioasid_register_allocator(struct ioasid_allocator_ops *allocator); void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator); int ioasid_set_data(ioasid_t ioasid, void *data); +static inline bool pasid_valid(ioasid_t ioasid) +{ + return ioasid != INVALID_IOASID; +} #else /* !CONFIG_IOASID */ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, @@ -78,5 +82,10 @@ static inline int ioasid_set_data(ioasid_t ioasid, void *data) return -ENOTSUPP; } +static inline bool pasid_valid(ioasid_t ioasid) +{ + return false; +} + #endif /* CONFIG_IOASID */ #endif /* __LINUX_IOASID_H */ -- cgit v1.2.3-71-gd317 From a6cbd44093ef305b02ad5f80ed54abf0148a696c Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 7 Feb 2022 15:02:47 -0800 Subject: kernel/fork: Initialize mm's PASID A new mm doesn't have a PASID yet when it's created. Initialize the mm's PASID on fork() or for init_mm to INVALID_IOASID (-1). INIT_PASID (0) is reserved for kernel legacy DMA PASID. It cannot be allocated to a user process. Initializing the process's PASID to 0 may cause confusion that's why the process uses the reserved kernel legacy DMA PASID. Initializing the PASID to INVALID_IOASID (-1) explicitly tells the process doesn't have a valid PASID yet. Even though the only user of mm_pasid_init() is in fork.c, define it in as the first of three mm/pasid life cycle functions (init/set/drop) to keep these all together. Suggested-by: Dave Hansen Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20220207230254.3342514-5-fenghua.yu@intel.com --- include/linux/sched/mm.h | 10 ++++++++++ kernel/fork.c | 10 ++-------- mm/init-mm.c | 4 ++++ 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index aa5f09ca5bcf..c74d1edbac2f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -8,6 +8,7 @@ #include #include #include +#include /* * Routines for handling mm_structs @@ -433,4 +434,13 @@ static inline void membarrier_update_current_mm(struct mm_struct *next_mm) } #endif +#ifdef CONFIG_IOMMU_SVA +static inline void mm_pasid_init(struct mm_struct *mm) +{ + mm->pasid = INVALID_IOASID; +} +#else +static inline void mm_pasid_init(struct mm_struct *mm) {} +#endif + #endif /* _LINUX_SCHED_MM_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 6ee7551d3bd2..deacd2c17a7f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -97,6 +97,7 @@ #include #include #include +#include #include #include @@ -1019,13 +1020,6 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) #endif } -static void mm_init_pasid(struct mm_struct *mm) -{ -#ifdef CONFIG_IOMMU_SVA - mm->pasid = INIT_PASID; -#endif -} - static void mm_init_uprobes_state(struct mm_struct *mm) { #ifdef CONFIG_UPROBES @@ -1054,7 +1048,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); - mm_init_pasid(mm); + mm_pasid_init(mm); RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_subscriptions_init(mm); init_tlb_flush_pending(mm); diff --git a/mm/init-mm.c b/mm/init-mm.c index b4a6f38fb51d..fbe7844d0912 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -10,6 +10,7 @@ #include #include +#include #include #ifndef INIT_MM_CONTEXT @@ -38,6 +39,9 @@ struct mm_struct init_mm = { .mmlist = LIST_HEAD_INIT(init_mm.mmlist), .user_ns = &init_user_ns, .cpu_bitmap = CPU_BITS_NONE, +#ifdef CONFIG_IOMMU_SVA + .pasid = INVALID_IOASID, +#endif INIT_MM_CONTEXT(init_mm) }; -- cgit v1.2.3-71-gd317 From 8cb37a5974a48569aab8a1736d21399fddbdbdb2 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 10:05:20 +0100 Subject: stack: Introduce CONFIG_RANDOMIZE_KSTACK_OFFSET The randomize_kstack_offset feature is unconditionally compiled in when the architecture supports it. To add constraints on compiler versions, we require a dedicated Kconfig variable. Therefore, introduce RANDOMIZE_KSTACK_OFFSET. Furthermore, this option is now also configurable by EXPERT kernels: while the feature is supposed to have zero performance overhead when disabled, due to its use of static branches, there are few cases where giving a distribution the option to disable the feature entirely makes sense. For example, in very resource constrained environments, which would never enable the feature to begin with, in which case the additional kernel code size increase would be redundant. Signed-off-by: Marco Elver Reviewed-by: Nathan Chancellor Acked-by: Peter Zijlstra (Intel) Acked-by: Kees Cook Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220131090521.1947110-1-elver@google.com --- arch/Kconfig | 23 ++++++++++++++++++----- include/linux/randomize_kstack.h | 5 +++++ init/main.c | 2 +- 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..2cde48d9b77c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1159,16 +1159,29 @@ config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET to the compiler, so it will attempt to add canary checks regardless of the static branch state. -config RANDOMIZE_KSTACK_OFFSET_DEFAULT - bool "Randomize kernel stack offset on syscall entry" +config RANDOMIZE_KSTACK_OFFSET + bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT + default y depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET help The kernel stack offset can be randomized (after pt_regs) by roughly 5 bits of entropy, frustrating memory corruption attacks that depend on stack address determinism or - cross-syscall address exposures. This feature is controlled - by kernel boot param "randomize_kstack_offset=on/off", and this - config chooses the default boot state. + cross-syscall address exposures. + + The feature is controlled via the "randomize_kstack_offset=on/off" + kernel boot param, and if turned off has zero overhead due to its use + of static branches (see JUMP_LABEL). + + If unsure, say Y. + +config RANDOMIZE_KSTACK_OFFSET_DEFAULT + bool "Default state of kernel stack offset randomization" + depends on RANDOMIZE_KSTACK_OFFSET + help + Kernel stack offset randomization is controlled by kernel boot param + "randomize_kstack_offset=on/off", and this config chooses the default + boot state. config ARCH_OPTIONAL_KERNEL_RWX def_bool n diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index bebc911161b6..91f1b990a3c3 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -2,6 +2,7 @@ #ifndef _LINUX_RANDOMIZE_KSTACK_H #define _LINUX_RANDOMIZE_KSTACK_H +#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET #include #include #include @@ -50,5 +51,9 @@ void *__builtin_alloca(size_t size); raw_cpu_write(kstack_offset, offset); \ } \ } while (0) +#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ +#define add_random_kstack_offset() do { } while (0) +#define choose_random_kstack_offset(rand) do { } while (0) +#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #endif diff --git a/init/main.c b/init/main.c index 65fa2e41a9c0..560f45c27ffe 100644 --- a/init/main.c +++ b/init/main.c @@ -853,7 +853,7 @@ static void __init mm_init(void) pti_init(); } -#ifdef CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET +#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, randomize_kstack_offset); DEFINE_PER_CPU(u32, kstack_offset); -- cgit v1.2.3-71-gd317 From efa90c11f62e6b7252fb75efe2787056872a627c Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 10:05:21 +0100 Subject: stack: Constrain and fix stack offset randomization with Clang builds All supported versions of Clang perform auto-init of __builtin_alloca() when stack auto-init is on (CONFIG_INIT_STACK_ALL_{ZERO,PATTERN}). add_random_kstack_offset() uses __builtin_alloca() to add a stack offset. This means, when CONFIG_INIT_STACK_ALL_{ZERO,PATTERN} is enabled, add_random_kstack_offset() will auto-init that unused portion of the stack used to add an offset. There are several problems with this: 1. These offsets can be as large as 1023 bytes. Performing memset() on them isn't exactly cheap, and this is done on every syscall entry. 2. Architectures adding add_random_kstack_offset() to syscall entry implemented in C require them to be 'noinstr' (e.g. see x86 and s390). The potential problem here is that a call to memset may occur, which is not noinstr. A x86_64 defconfig kernel with Clang 11 and CONFIG_VMLINUX_VALIDATION shows: | vmlinux.o: warning: objtool: do_syscall_64()+0x9d: call to memset() leaves .noinstr.text section | vmlinux.o: warning: objtool: do_int80_syscall_32()+0xab: call to memset() leaves .noinstr.text section | vmlinux.o: warning: objtool: __do_fast_syscall_32()+0xe2: call to memset() leaves .noinstr.text section | vmlinux.o: warning: objtool: fixup_bad_iret()+0x2f: call to memset() leaves .noinstr.text section Clang 14 (unreleased) will introduce a way to skip alloca initialization via __builtin_alloca_uninitialized() (https://reviews.llvm.org/D115440). Constrain RANDOMIZE_KSTACK_OFFSET to only be enabled if no stack auto-init is enabled, the compiler is GCC, or Clang is version 14+. Use __builtin_alloca_uninitialized() if the compiler provides it, as is done by Clang 14. Link: https://lkml.kernel.org/r/YbHTKUjEejZCLyhX@elver.google.com Fixes: 39218ff4c625 ("stack: Optionally randomize kernel stack offset each syscall") Signed-off-by: Marco Elver Reviewed-by: Nathan Chancellor Acked-by: Kees Cook Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220131090521.1947110-2-elver@google.com --- arch/Kconfig | 1 + include/linux/randomize_kstack.h | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 2cde48d9b77c..c5b50bfe31c1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1163,6 +1163,7 @@ config RANDOMIZE_KSTACK_OFFSET bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT default y depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000 help The kernel stack offset can be randomized (after pt_regs) by roughly 5 bits of entropy, frustrating memory corruption diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index 91f1b990a3c3..1468caf001c0 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -17,8 +17,20 @@ DECLARE_PER_CPU(u32, kstack_offset); * alignment. Also, since this use is being explicitly masked to a max of * 10 bits, stack-clash style attacks are unlikely. For more details see * "VLAs" in Documentation/process/deprecated.rst + * + * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently + * only with Clang and not GCC). Initializing the unused area on each syscall + * entry is expensive, and generating an implicit call to memset() may also be + * problematic (such as in noinstr functions). Therefore, if the compiler + * supports it (which it should if it initializes allocas), always use the + * "uninitialized" variant of the builtin. */ -void *__builtin_alloca(size_t size); +#if __has_builtin(__builtin_alloca_uninitialized) +#define __kstack_alloca __builtin_alloca_uninitialized +#else +#define __kstack_alloca __builtin_alloca +#endif + /* * Use, at most, 10 bits of entropy. We explicitly cap this to keep the * "VLA" from being unbounded (see above). 10 bits leaves enough room for @@ -37,7 +49,7 @@ void *__builtin_alloca(size_t size); if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ &randomize_kstack_offset)) { \ u32 offset = raw_cpu_read(kstack_offset); \ - u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ + u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \ /* Keep allocation even after "ptr" loses scope. */ \ asm volatile("" :: "r"(ptr) : "memory"); \ } \ -- cgit v1.2.3-71-gd317 From 481153991c410e322383490527c20b0c41dbf652 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 10 Feb 2022 22:33:41 +0100 Subject: i2c: don't expose function which is only used internally MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i2c_setup_smbus_alert() is only needed within the I2C core, so no need to expose it to other modules. Signed-off-by: Wolfram Sang Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-smbus.c | 1 - drivers/i2c/i2c-core.h | 9 +++++++++ include/linux/i2c-smbus.h | 8 -------- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 304c2c8fee68..053b215308c4 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -716,5 +716,4 @@ int i2c_setup_smbus_alert(struct i2c_adapter *adapter) return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL)); } -EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert); #endif diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 8ce261167a2d..87e2c914f1c5 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -86,3 +86,12 @@ void of_i2c_register_devices(struct i2c_adapter *adap); static inline void of_i2c_register_devices(struct i2c_adapter *adap) { } #endif extern struct notifier_block i2c_of_notifier; + +#if IS_ENABLED(CONFIG_I2C_SMBUS) +int i2c_setup_smbus_alert(struct i2c_adapter *adap); +#else +static inline int i2c_setup_smbus_alert(struct i2c_adapter *adap) +{ + return 0; +} +#endif diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 95cf902e0bda..ced1c6ead52a 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -30,14 +30,6 @@ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); -#if IS_ENABLED(CONFIG_I2C_SMBUS) -int i2c_setup_smbus_alert(struct i2c_adapter *adap); -#else -static inline int i2c_setup_smbus_alert(struct i2c_adapter *adap) -{ - return 0; -} -#endif #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE) struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter); void i2c_free_slave_host_notify_device(struct i2c_client *client); -- cgit v1.2.3-71-gd317 From 701fac40384f07197b106136012804c3cae0b3de Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 7 Feb 2022 15:02:48 -0800 Subject: iommu/sva: Assign a PASID to mm on PASID allocation and free it on mm exit PASIDs are process-wide. It was attempted to use refcounted PASIDs to free them when the last thread drops the refcount. This turned out to be complex and error prone. Given the fact that the PASID space is 20 bits, which allows up to 1M processes to have a PASID associated concurrently, PASID resource exhaustion is not a realistic concern. Therefore, it was decided to simplify the approach and stick with lazy on demand PASID allocation, but drop the eager free approach and make an allocated PASID's lifetime bound to the lifetime of the process. Get rid of the refcounting mechanisms and replace/rename the interfaces to reflect this new approach. [ bp: Massage commit message. ] Suggested-by: Dave Hansen Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Lu Baolu Reviewed-by: Jacob Pan Reviewed-by: Thomas Gleixner Acked-by: Joerg Roedel Link: https://lore.kernel.org/r/20220207230254.3342514-6-fenghua.yu@intel.com --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 5 +--- drivers/iommu/intel/iommu.c | 4 +-- drivers/iommu/intel/svm.c | 9 ------ drivers/iommu/ioasid.c | 39 +++---------------------- drivers/iommu/iommu-sva-lib.c | 39 ++++++++----------------- drivers/iommu/iommu-sva-lib.h | 1 - include/linux/ioasid.h | 12 ++------ include/linux/sched/mm.h | 16 ++++++++++ kernel/fork.c | 1 + 9 files changed, 38 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index a737ba5f727e..22ddd05bbdcd 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -340,14 +340,12 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); if (IS_ERR(bond->smmu_mn)) { ret = PTR_ERR(bond->smmu_mn); - goto err_free_pasid; + goto err_free_bond; } list_add(&bond->list, &master->bonds); return &bond->sva; -err_free_pasid: - iommu_sva_free_pasid(mm); err_free_bond: kfree(bond); return ERR_PTR(ret); @@ -377,7 +375,6 @@ void arm_smmu_sva_unbind(struct iommu_sva *handle) if (refcount_dec_and_test(&bond->refs)) { list_del(&bond->list); arm_smmu_mmu_notifier_put(bond->smmu_mn); - iommu_sva_free_pasid(bond->mm); kfree(bond); } mutex_unlock(&sva_lock); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..ef03b2176bbd 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4781,7 +4781,7 @@ attach_failed: link_failed: spin_unlock_irqrestore(&device_domain_lock, flags); if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); + ioasid_free(domain->default_pasid); return ret; } @@ -4811,7 +4811,7 @@ static void aux_domain_remove_dev(struct dmar_domain *domain, spin_unlock_irqrestore(&device_domain_lock, flags); if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); + ioasid_free(domain->default_pasid); } static int prepare_domain_attach_device(struct iommu_domain *domain, diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 5b5d69b04fcc..51ac2096b3da 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -514,11 +514,6 @@ static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1); } -static void intel_svm_free_pasid(struct mm_struct *mm) -{ - iommu_sva_free_pasid(mm); -} - static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, struct mm_struct *mm, @@ -662,8 +657,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } - /* Drop a PASID reference and free it if no reference. */ - intel_svm_free_pasid(mm); } out: return ret; @@ -1047,8 +1040,6 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void } sva = intel_svm_bind_mm(iommu, dev, mm, flags); - if (IS_ERR_OR_NULL(sva)) - intel_svm_free_pasid(mm); mutex_unlock(&pasid_mutex); return sva; diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index 06fee7416816..a786c034907c 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -2,7 +2,7 @@ /* * I/O Address Space ID allocator. There is one global IOASID space, split into * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and - * free IOASIDs with ioasid_alloc and ioasid_put. + * free IOASIDs with ioasid_alloc() and ioasid_free(). */ #include #include @@ -15,7 +15,6 @@ struct ioasid_data { struct ioasid_set *set; void *private; struct rcu_head rcu; - refcount_t refs; }; /* @@ -315,7 +314,6 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, data->set = set; data->private = private; - refcount_set(&data->refs, 1); /* * Custom allocator needs allocator data to perform platform specific @@ -348,35 +346,11 @@ exit_free: EXPORT_SYMBOL_GPL(ioasid_alloc); /** - * ioasid_get - obtain a reference to the IOASID - * @ioasid: the ID to get - */ -void ioasid_get(ioasid_t ioasid) -{ - struct ioasid_data *ioasid_data; - - spin_lock(&ioasid_allocator_lock); - ioasid_data = xa_load(&active_allocator->xa, ioasid); - if (ioasid_data) - refcount_inc(&ioasid_data->refs); - else - WARN_ON(1); - spin_unlock(&ioasid_allocator_lock); -} -EXPORT_SYMBOL_GPL(ioasid_get); - -/** - * ioasid_put - Release a reference to an ioasid + * ioasid_free - Free an ioasid * @ioasid: the ID to remove - * - * Put a reference to the IOASID, free it when the number of references drops to - * zero. - * - * Return: %true if the IOASID was freed, %false otherwise. */ -bool ioasid_put(ioasid_t ioasid) +void ioasid_free(ioasid_t ioasid) { - bool free = false; struct ioasid_data *ioasid_data; spin_lock(&ioasid_allocator_lock); @@ -386,10 +360,6 @@ bool ioasid_put(ioasid_t ioasid) goto exit_unlock; } - free = refcount_dec_and_test(&ioasid_data->refs); - if (!free) - goto exit_unlock; - active_allocator->ops->free(ioasid, active_allocator->ops->pdata); /* Custom allocator needs additional steps to free the xa element */ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { @@ -399,9 +369,8 @@ bool ioasid_put(ioasid_t ioasid) exit_unlock: spin_unlock(&ioasid_allocator_lock); - return free; } -EXPORT_SYMBOL_GPL(ioasid_put); +EXPORT_SYMBOL_GPL(ioasid_free); /** * ioasid_find - Find IOASID data diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c index bd41405d34e9..106506143896 100644 --- a/drivers/iommu/iommu-sva-lib.c +++ b/drivers/iommu/iommu-sva-lib.c @@ -18,8 +18,7 @@ static DECLARE_IOASID_SET(iommu_sva_pasid); * * Try to allocate a PASID for this mm, or take a reference to the existing one * provided it fits within the [@min, @max] range. On success the PASID is - * available in mm->pasid, and must be released with iommu_sva_free_pasid(). - * @min must be greater than 0, because 0 indicates an unused mm->pasid. + * available in mm->pasid and will be available for the lifetime of the mm. * * Returns 0 on success and < 0 on error. */ @@ -33,38 +32,24 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) return -EINVAL; mutex_lock(&iommu_sva_lock); - if (mm->pasid) { - if (mm->pasid >= min && mm->pasid <= max) - ioasid_get(mm->pasid); - else + /* Is a PASID already associated with this mm? */ + if (pasid_valid(mm->pasid)) { + if (mm->pasid < min || mm->pasid >= max) ret = -EOVERFLOW; - } else { - pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); - if (pasid == INVALID_IOASID) - ret = -ENOMEM; - else - mm->pasid = pasid; + goto out; } + + pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); + if (!pasid_valid(pasid)) + ret = -ENOMEM; + else + mm_pasid_set(mm, pasid); +out: mutex_unlock(&iommu_sva_lock); return ret; } EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); -/** - * iommu_sva_free_pasid - Release the mm's PASID - * @mm: the mm - * - * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid() - */ -void iommu_sva_free_pasid(struct mm_struct *mm) -{ - mutex_lock(&iommu_sva_lock); - if (ioasid_put(mm->pasid)) - mm->pasid = 0; - mutex_unlock(&iommu_sva_lock); -} -EXPORT_SYMBOL_GPL(iommu_sva_free_pasid); - /* ioasid_find getter() requires a void * argument */ static bool __mmget_not_zero(void *mm) { diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h index 95dc3ebc1928..8909ea1094e3 100644 --- a/drivers/iommu/iommu-sva-lib.h +++ b/drivers/iommu/iommu-sva-lib.h @@ -9,7 +9,6 @@ #include int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max); -void iommu_sva_free_pasid(struct mm_struct *mm); struct mm_struct *iommu_sva_find(ioasid_t pasid); /* I/O Page fault */ diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h index 2237f64dbaae..af1c9d62e642 100644 --- a/include/linux/ioasid.h +++ b/include/linux/ioasid.h @@ -34,8 +34,7 @@ struct ioasid_allocator_ops { #if IS_ENABLED(CONFIG_IOASID) ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void *private); -void ioasid_get(ioasid_t ioasid); -bool ioasid_put(ioasid_t ioasid); +void ioasid_free(ioasid_t ioasid); void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)); int ioasid_register_allocator(struct ioasid_allocator_ops *allocator); @@ -53,14 +52,7 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, return INVALID_IOASID; } -static inline void ioasid_get(ioasid_t ioasid) -{ -} - -static inline bool ioasid_put(ioasid_t ioasid) -{ - return false; -} +static inline void ioasid_free(ioasid_t ioasid) { } static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)) diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index c74d1edbac2f..a80356e9dc69 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -439,8 +439,24 @@ static inline void mm_pasid_init(struct mm_struct *mm) { mm->pasid = INVALID_IOASID; } + +/* Associate a PASID with an mm_struct: */ +static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) +{ + mm->pasid = pasid; +} + +static inline void mm_pasid_drop(struct mm_struct *mm) +{ + if (pasid_valid(mm->pasid)) { + ioasid_free(mm->pasid); + mm->pasid = INVALID_IOASID; + } +} #else static inline void mm_pasid_init(struct mm_struct *mm) {} +static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {} +static inline void mm_pasid_drop(struct mm_struct *mm) {} #endif #endif /* _LINUX_SCHED_MM_H */ diff --git a/kernel/fork.c b/kernel/fork.c index deacd2c17a7f..c03c6682464c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1115,6 +1115,7 @@ static inline void __mmput(struct mm_struct *mm) } if (mm->binfmt) module_put(mm->binfmt->module); + mm_pasid_drop(mm); mmdrop(mm); } -- cgit v1.2.3-71-gd317 From a3d29e8291b622780eb6e4e3eeaf2b24ec78fd43 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Feb 2022 15:02:50 -0800 Subject: sched: Define and initialize a flag to identify valid PASID in the task Add a new single bit field to the task structure to track whether this task has initialized the IA32_PASID MSR to the mm's PASID. Initialize the field to zero when creating a new task with fork/clone. Signed-off-by: Peter Zijlstra Co-developed-by: Fenghua Yu Signed-off-by: Fenghua Yu Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20220207230254.3342514-8-fenghua.yu@intel.com --- include/linux/sched.h | 3 +++ kernel/fork.c | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..4e5de3aed410 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -938,6 +938,9 @@ struct task_struct { /* Recursion prevention for eventfd_signal() */ unsigned in_eventfd_signal:1; #endif +#ifdef CONFIG_IOMMU_SVA + unsigned pasid_activated:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ diff --git a/kernel/fork.c b/kernel/fork.c index c03c6682464c..51fd1df994b7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -968,6 +968,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->use_memdelay = 0; #endif +#ifdef CONFIG_IOMMU_SVA + tsk->pasid_activated = 0; +#endif + #ifdef CONFIG_MEMCG tsk->active_memcg = NULL; #endif -- cgit v1.2.3-71-gd317 From 45ec846c1cd11835a29c85645065115dd791aa45 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 9 Feb 2022 16:25:58 +0000 Subject: irqdomain: Let irq_domain_set_{info,hwirq_and_chip} take a const irq_chip In order to let a const irqchip be fed to the irqchip layer, adjust the various prototypes. An extra cast in irq_domain_set_hwirq_and_chip() is required to avoid a warning. Signed-off-by: Marc Zyngier Acked-by: Linus Walleij Link: https://lore.kernel.org/r/20220209162607.1118325-2-maz@kernel.org --- include/linux/irqdomain.h | 5 +++-- kernel/irq/irqdomain.c | 9 +++++---- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index be25a33293e5..00d577f90883 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -479,7 +479,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, struct irq_chip *chip, + irq_hw_number_t hwirq, + const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); extern void irq_domain_reset_irq_data(struct irq_data *irq_data); @@ -522,7 +523,7 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, - struct irq_chip *chip, + const struct irq_chip *chip, void *chip_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index bf38c546aa25..d5ce96510549 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1319,7 +1319,8 @@ EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); * @chip_data: The associated chip data */ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, struct irq_chip *chip, + irq_hw_number_t hwirq, + const struct irq_chip *chip, void *chip_data) { struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); @@ -1328,7 +1329,7 @@ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, return -ENOENT; irq_data->hwirq = hwirq; - irq_data->chip = chip ? chip : &no_irq_chip; + irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip); irq_data->chip_data = chip_data; return 0; @@ -1347,7 +1348,7 @@ EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, struct irq_chip *chip, + irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { @@ -1853,7 +1854,7 @@ EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, struct irq_chip *chip, + irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { -- cgit v1.2.3-71-gd317 From 393e1280f765661cf39785e967676a4e57324126 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 9 Feb 2022 16:25:59 +0000 Subject: genirq: Allow irq_chip registration functions to take a const irq_chip In order to let a const irqchip be fed to the irqchip layer, adjust the various prototypes. An extra cast in irq_set_chip()() is required to avoid a warning. Signed-off-by: Marc Zyngier Acked-by: Linus Walleij Link: https://lore.kernel.org/r/20220209162607.1118325-3-maz@kernel.org --- include/linux/irq.h | 7 ++++--- kernel/irq/chip.c | 9 +++------ 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 2cb2e2ac2703..f92788ccdba2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -710,10 +710,11 @@ extern struct irq_chip no_irq_chip; extern struct irq_chip dummy_irq_chip; extern void -irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, +irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name); -static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, +static inline void irq_set_chip_and_handler(unsigned int irq, + const struct irq_chip *chip, irq_flow_handler_t handle) { irq_set_chip_and_handler_name(irq, chip, handle, NULL); @@ -803,7 +804,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) } /* Set/get chip/data for an IRQ: */ -extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); +extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); extern int irq_set_handler_data(unsigned int irq, void *data); extern int irq_set_chip_data(unsigned int irq, void *data); extern int irq_set_irq_type(unsigned int irq, unsigned int type); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 24b6f2b40e5e..54af0deb239b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -38,7 +38,7 @@ struct irqaction chained_action = { * @irq: irq number * @chip: pointer to irq chip description structure */ -int irq_set_chip(unsigned int irq, struct irq_chip *chip) +int irq_set_chip(unsigned int irq, const struct irq_chip *chip) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); @@ -46,10 +46,7 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip) if (!desc) return -EINVAL; - if (!chip) - chip = &no_irq_chip; - - desc->irq_data.chip = chip; + desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); irq_put_desc_unlock(desc, flags); /* * For !CONFIG_SPARSE_IRQ make the irq show up in @@ -1073,7 +1070,7 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); void -irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, +irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name) { irq_set_chip(irq, chip); -- cgit v1.2.3-71-gd317 From 3fb212a042fbd8eccbb2af1852e03ed7757b9600 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 9 Feb 2022 16:26:04 +0000 Subject: irqchip/versatile-fpga: Switch to dynamic chip name output Move the name output to the relevant callback, which allows us some nice cleanups (mostly owing to the fact that the driver is now DT only. We also drop a random include directive from the ftintc010 driver. Signed-off-by: Marc Zyngier Reviewed-by: Linus Walleij Acked-by: Linus Walleij Link: https://lore.kernel.org/r/20220209162607.1118325-8-maz@kernel.org --- drivers/irqchip/irq-ftintc010.c | 1 - drivers/irqchip/irq-versatile-fpga.c | 46 +++++++++++++++++++--------------- include/linux/irqchip/versatile-fpga.h | 14 ----------- 3 files changed, 26 insertions(+), 35 deletions(-) delete mode 100644 include/linux/irqchip/versatile-fpga.h (limited to 'include/linux') diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index 5cc268880f8e..46a3aa60e50e 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index f2757b6aecc8..ba543ed9c154 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -7,12 +7,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include @@ -34,14 +34,12 @@ /** * struct fpga_irq_data - irq data container for the FPGA IRQ controller * @base: memory offset in virtual memory - * @chip: chip container for this instance * @domain: IRQ domain for this instance * @valid: mask for valid IRQs on this controller * @used_irqs: number of active IRQs on this controller */ struct fpga_irq_data { void __iomem *base; - struct irq_chip chip; u32 valid; struct irq_domain *domain; u8 used_irqs; @@ -67,6 +65,20 @@ static void fpga_irq_unmask(struct irq_data *d) writel(mask, f->base + IRQ_ENABLE_SET); } +static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p) +{ + struct fpga_irq_data *f = irq_data_get_irq_chip_data(d); + + seq_printf(p, irq_domain_get_of_node(f->domain)->name); +} + +static const struct irq_chip fpga_chip = { + .irq_ack = fpga_irq_mask, + .irq_mask = fpga_irq_mask, + .irq_unmask = fpga_irq_unmask, + .irq_print_chip = fpga_irq_print_chip, +}; + static void fpga_irq_handle(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -116,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs) * Keep iterating over all registered FPGA IRQ controllers until there are * no pending interrupts. */ -asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) +static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) { int i, handled; @@ -135,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, if (!(f->valid & BIT(hwirq))) return -EPERM; irq_set_chip_data(irq, f); - irq_set_chip_and_handler(irq, &f->chip, - handle_level_irq); + irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq); irq_set_probe(irq); return 0; } @@ -146,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, - int parent_irq, u32 valid, struct device_node *node) +static void __init fpga_irq_init(void __iomem *base, int parent_irq, + u32 valid, struct device_node *node) { struct fpga_irq_data *f; int i; @@ -158,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } f = &fpga_irq_devices[fpga_irq_id]; f->base = base; - f->chip.name = name; - f->chip.irq_ack = fpga_irq_mask; - f->chip.irq_mask = fpga_irq_mask; - f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; if (parent_irq != -1) { @@ -169,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, f); } - /* This will also allocate irq descriptors */ - f->domain = irq_domain_add_simple(node, fls(valid), irq_start, + f->domain = irq_domain_add_linear(node, fls(valid), &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) if (valid & BIT(i)) { - if (!irq_start) - irq_create_mapping(f->domain, i); + /* Is this still required? */ + irq_create_mapping(f->domain, i); f->used_irqs++; } pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", - fpga_irq_id, name, base, f->used_irqs); + fpga_irq_id, node->name, base, f->used_irqs); if (parent_irq != -1) pr_cont(", parent IRQ: %d\n", parent_irq); else @@ -192,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, } #ifdef CONFIG_OF -int __init fpga_irq_of_init(struct device_node *node, - struct device_node *parent) +static int __init fpga_irq_of_init(struct device_node *node, + struct device_node *parent) { void __iomem *base; u32 clear_mask; @@ -222,7 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node, parent_irq = -1; } - fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); + fpga_irq_init(base, parent_irq, valid_mask, node); /* * On Versatile AB/PB, some secondary interrupts have a direct diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h deleted file mode 100644 index a978fc8c7996..000000000000 --- a/include/linux/irqchip/versatile-fpga.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef PLAT_FPGA_IRQ_H -#define PLAT_FPGA_IRQ_H - -struct device_node; -struct pt_regs; - -void fpga_handle_irq(struct pt_regs *regs); -void fpga_irq_init(void __iomem *, const char *, int, int, u32, - struct device_node *node); -int fpga_irq_of_init(struct device_node *node, - struct device_node *parent); - -#endif -- cgit v1.2.3-71-gd317 From 5e50f5d4ff31e95599d695df1f0a4e7d2d6fef99 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Sat, 12 Feb 2022 18:59:21 +0100 Subject: security: add sctp_assoc_established hook security_sctp_assoc_established() is added to replace security_inet_conn_established() called in sctp_sf_do_5_1E_ca(), so that asoc can be accessed in security subsystem and save the peer secid to asoc->peer_secid. Fixes: 72e89f50084c ("security: Add support for SCTP security hooks") Reported-by: Prashanth Prahlad Based-on-patch-by: Xin Long Reviewed-by: Xin Long Tested-by: Richard Haines Signed-off-by: Ondrej Mosnacek Signed-off-by: Paul Moore --- Documentation/security/SCTP.rst | 22 ++++++++++------------ include/linux/lsm_hook_defs.h | 2 ++ include/linux/lsm_hooks.h | 5 +++++ include/linux/security.h | 8 ++++++++ net/sctp/sm_statefuns.c | 8 +++++--- security/security.c | 7 +++++++ 6 files changed, 37 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst index d5fd6ccc3dcb..406cc68b8808 100644 --- a/Documentation/security/SCTP.rst +++ b/Documentation/security/SCTP.rst @@ -15,10 +15,7 @@ For security module support, three SCTP specific hooks have been implemented:: security_sctp_assoc_request() security_sctp_bind_connect() security_sctp_sk_clone() - -Also the following security hook has been utilised:: - - security_inet_conn_established() + security_sctp_assoc_established() The usage of these hooks are described below with the SELinux implementation described in the `SCTP SELinux Support`_ chapter. @@ -122,11 +119,12 @@ calls **sctp_peeloff**\(3). @newsk - pointer to new sock structure. -security_inet_conn_established() +security_sctp_assoc_established() ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Called when a COOKIE ACK is received:: +Called when a COOKIE ACK is received, and the peer secid will be +saved into ``@asoc->peer_secid`` for client:: - @sk - pointer to sock structure. + @asoc - pointer to sctp association structure. @skb - pointer to skbuff of the COOKIE ACK packet. @@ -134,7 +132,7 @@ Security Hooks used for Association Establishment ------------------------------------------------- The following diagram shows the use of ``security_sctp_bind_connect()``, -``security_sctp_assoc_request()``, ``security_inet_conn_established()`` when +``security_sctp_assoc_request()``, ``security_sctp_assoc_established()`` when establishing an association. :: @@ -172,7 +170,7 @@ establishing an association. <------------------------------------------- COOKIE ACK | | sctp_sf_do_5_1E_ca | - Call security_inet_conn_established() | + Call security_sctp_assoc_established() | to set the peer label. | | | | If SCTP_SOCKET_TCP or peeled off @@ -198,7 +196,7 @@ hooks with the SELinux specifics expanded below:: security_sctp_assoc_request() security_sctp_bind_connect() security_sctp_sk_clone() - security_inet_conn_established() + security_sctp_assoc_established() security_sctp_assoc_request() @@ -271,12 +269,12 @@ sockets sid and peer sid to that contained in the ``@asoc sid`` and @newsk - pointer to new sock structure. -security_inet_conn_established() +security_sctp_assoc_established() ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Called when a COOKIE ACK is received where it sets the connection's peer sid to that in ``@skb``:: - @sk - pointer to sock structure. + @asoc - pointer to sctp association structure. @skb - pointer to skbuff of the COOKIE ACK packet. diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index a5a724c308d8..45931d81ccc3 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -332,6 +332,8 @@ LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, struct sockaddr *address, int addrlen) LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) +LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc, + struct sk_buff *skb) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 3bf5c658bc44..419b5febc3ca 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1046,6 +1046,11 @@ * @asoc pointer to current sctp association structure. * @sk pointer to current sock structure. * @newsk pointer to new sock structure. + * @sctp_assoc_established: + * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet + * to the security module. + * @asoc pointer to sctp association structure. + * @skb pointer to skbuff of association packet. * * Security hooks for Infiniband * diff --git a/include/linux/security.h b/include/linux/security.h index 6d72772182c8..25b3ef71f495 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1422,6 +1422,8 @@ int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen); void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk); +int security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1641,6 +1643,12 @@ static inline void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *newsk) { } + +static inline int security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb) +{ + return 0; +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index cc544a97c4af..7f342bc12735 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -930,6 +930,11 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net, if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + /* Set peer label for connection. */ + if (security_sctp_assoc_established((struct sctp_association *)asoc, + chunk->skb)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + /* Verify that the chunk length for the COOKIE-ACK is OK. * If we don't do this, any bundled chunks may be junked. */ @@ -945,9 +950,6 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net, */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); - /* Set peer label for connection. */ - security_inet_conn_established(ep->base.sk, chunk->skb); - /* RFC 2960 5.1 Normal Establishment of an Association * * E) Upon reception of the COOKIE ACK, endpoint "A" will move diff --git a/security/security.c b/security/security.c index e649c8691be2..9663ffcca4b0 100644 --- a/security/security.c +++ b/security/security.c @@ -2393,6 +2393,13 @@ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, } EXPORT_SYMBOL(security_sctp_sk_clone); +int security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb) +{ + return call_int_hook(sctp_assoc_established, 0, asoc, skb); +} +EXPORT_SYMBOL(security_sctp_assoc_established); + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND -- cgit v1.2.3-71-gd317 From b62a8486de3ab1d7c2353ec422b9cca3abfcfbcd Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 31 Jan 2022 16:54:52 +0000 Subject: elfcore: Replace CONFIG_{IA64, UML} checks with a new option As arm64 is about to introduce MTE-specific phdrs in the core dump, add a common CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS option currently selectable by UML_X86 and IA64. Signed-off-by: Catalin Marinas Cc: Eric Biederman Link: https://lore.kernel.org/r/20220131165456.2160675-2-catalin.marinas@arm.com Signed-off-by: Will Deacon --- arch/ia64/Kconfig | 1 + arch/x86/um/Kconfig | 1 + fs/Kconfig.binfmt | 3 +++ include/linux/elfcore.h | 4 ++-- 4 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a7e01573abd8..e003b2473c64 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -8,6 +8,7 @@ menu "Processor type and features" config IA64 bool + select ARCH_BINFMT_ELF_EXTRA_PHDRS select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 40d6a06e41c8..ead7e5b3a975 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -8,6 +8,7 @@ endmenu config UML_X86 def_bool y + select ARCH_BINFMT_ELF_EXTRA_PHDRS if X86_32 config 64BIT bool "64-bit kernel" if "$(SUBARCH)" = "x86" diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 4d5ae61580aa..68e586283764 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -36,6 +36,9 @@ config COMPAT_BINFMT_ELF config ARCH_BINFMT_ELF_STATE bool +config ARCH_BINFMT_ELF_EXTRA_PHDRS + bool + config ARCH_HAVE_ELF_PROT bool diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 746e081879a5..f8e206e82476 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -114,7 +114,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64) +#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -149,6 +149,6 @@ static inline size_t elf_core_extra_data_size(void) { return 0; } -#endif +#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */ #endif /* _LINUX_ELFCORE_H */ -- cgit v1.2.3-71-gd317 From f41b6be1ebdae452819551ed35a46e6fd32bf467 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:51 +0100 Subject: tee: remove unused tee_shm_pool_alloc_res_mem() None of the drivers in the TEE subsystem uses tee_shm_pool_alloc_res_mem() so remove the function. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm_pool.c | 56 ---------------------------------------------- include/linux/tee_drv.h | 30 ------------------------- 2 files changed, 86 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index fcbb461fc59c..a9f9d50fd181 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -47,62 +47,6 @@ static const struct tee_shm_pool_mgr_ops pool_ops_generic = { .destroy_poolmgr = pool_op_gen_destroy_poolmgr, }; -/** - * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved - * memory range - * @priv_info: Information for driver private shared memory pool - * @dmabuf_info: Information for dma-buf shared memory pool - * - * Start and end of pools will must be page aligned. - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool * -tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, - struct tee_shm_pool_mem_info *dmabuf_info) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - /* - * Create the pool for driver private shared memory - */ - rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, - priv_info->size, - 3 /* 8 byte aligned */); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - /* - * Create the pool for dma_buf shared memory - */ - rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, - dmabuf_info->paddr, - dmabuf_info->size, PAGE_SHIFT); - if (IS_ERR(rc)) - goto err_free_priv_mgr; - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) - goto err_free_dmabuf_mgr; - - return rc; - -err_free_dmabuf_mgr: - tee_shm_pool_mgr_destroy(dmabuf_mgr); -err_free_priv_mgr: - tee_shm_pool_mgr_destroy(priv_mgr); - - return rc; -} -EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); - struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, phys_addr_t paddr, size_t size, diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 5e1533ee3785..6b0f0d01ebdf 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -278,36 +278,6 @@ static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) poolm->ops->destroy_poolmgr(poolm); } -/** - * struct tee_shm_pool_mem_info - holds information needed to create a shared - * memory pool - * @vaddr: Virtual address of start of pool - * @paddr: Physical address of start of pool - * @size: Size in bytes of the pool - */ -struct tee_shm_pool_mem_info { - unsigned long vaddr; - phys_addr_t paddr; - size_t size; -}; - -/** - * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved - * memory range - * @priv_info: Information for driver private shared memory pool - * @dmabuf_info: Information for dma-buf shared memory pool - * - * Start and end of pools will must be page aligned. - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool * -tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, - struct tee_shm_pool_mem_info *dmabuf_info); - /** * tee_shm_pool_free() - Free a shared memory pool * @pool: The shared memory pool to free -- cgit v1.2.3-71-gd317 From 71cc47d4cc1f7a333584e0f2f7c863c71a6d3ced Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:52 +0100 Subject: tee: add tee_shm_alloc_user_buf() Adds a new function tee_shm_alloc_user_buf() for user mode allocations, replacing passing the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF to tee_shm_alloc(). Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 2 +- drivers/tee/tee_private.h | 2 ++ drivers/tee/tee_shm.c | 17 +++++++++++++++++ drivers/tee/tee_shm_pool.c | 2 +- include/linux/tee_drv.h | 2 +- 5 files changed, 22 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 3fc426dad2df..a15812baaeb1 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -297,7 +297,7 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, if (data.flags) return -EINVAL; - shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + shm = tee_shm_alloc_user_buf(ctx, data.size); if (IS_ERR(shm)) return PTR_ERR(shm); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index e55204df31ce..e09c8aa5d967 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -68,4 +68,6 @@ void tee_device_put(struct tee_device *teedev); void teedev_ctx_get(struct tee_context *ctx); void teedev_ctx_put(struct tee_context *ctx); +struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); + #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 499fccba3d74..7e7e762fc1de 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -127,6 +127,23 @@ err_dev_put: } EXPORT_SYMBOL_GPL(tee_shm_alloc); +/** + * tee_shm_alloc_user_buf() - Allocate shared memory for user space + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * + * Memory allocated as user space shared memory is automatically freed when + * the TEE file pointer is closed. The primary usage of this function is + * when the TEE driver doesn't support registering ordinary user space + * memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) +{ + return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); +} + /** * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer * @ctx: Context that allocates the shared memory diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index a9f9d50fd181..54c11aa374a8 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2015 Linaro Limited */ #include #include diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 6b0f0d01ebdf..a4393c8c38f3 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2016, Linaro Limited + * Copyright (c) 2015-2016 Linaro Limited */ #ifndef __TEE_DRV_H -- cgit v1.2.3-71-gd317 From d88e0493a054c9fe72ade41a42d42e958ee6503d Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:53 +0100 Subject: tee: simplify shm pool handling Replaces the shared memory pool based on two pools with a single pool. The alloc() function pointer in struct tee_shm_pool_ops gets another parameter, align. This makes it possible to make less than page aligned allocations from the optional reserved shared memory pool while still making user space allocations page aligned. With in practice unchanged behaviour using only a single pool for bookkeeping. The allocation algorithm in the static OP-TEE shared memory pool is changed from best-fit to first-fit since only the latter supports an alignment parameter. The best-fit algorithm was previously the default choice and not a conscious one. The optee and amdtee drivers are updated as needed to work with this changed pool handling. This also removes OPTEE_SHM_NUM_PRIV_PAGES which becomes obsolete with this change as the private pages can be mixed with the payload pages. The OP-TEE driver changes minimum alignment for argument struct from 8 bytes to 512 bytes. A typical OP-TEE private shm allocation is 224 bytes (argument struct with 6 parameters, needed for open session). So with an alignment of 512 well waste a bit more than 50%. Before this we had a single page reserved for this so worst case usage compared to that would be 3 pages instead of 1 page. However, this worst case only occurs if there is a high pressure from multiple threads on secure world. All in all this should scale up and down better than fixed boundaries. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/amdtee/shm_pool.c | 55 ++++++------------- drivers/tee/optee/Kconfig | 8 --- drivers/tee/optee/core.c | 11 ++-- drivers/tee/optee/ffa_abi.c | 55 +++++-------------- drivers/tee/optee/optee_private.h | 4 +- drivers/tee/optee/smc_abi.c | 108 +++++++++++--------------------------- drivers/tee/tee_private.h | 11 ---- drivers/tee/tee_shm.c | 29 +++++----- drivers/tee/tee_shm_pool.c | 106 ++++++++++++------------------------- include/linux/tee_drv.h | 60 +++++++-------------- 10 files changed, 137 insertions(+), 310 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c index 065854e2db18..f87f96a291c9 100644 --- a/drivers/tee/amdtee/shm_pool.c +++ b/drivers/tee/amdtee/shm_pool.c @@ -8,13 +8,17 @@ #include #include "amdtee_private.h" -static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm, - size_t size) +static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align) { unsigned int order = get_order(size); unsigned long va; int rc; + /* + * Ignore alignment since this is already going to be page aligned + * and there's no need for any larger alignment. + */ va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!va) return -ENOMEM; @@ -34,7 +38,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm, return 0; } -static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) +static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { /* Unmap the shared memory from TEE */ amdtee_unmap_shmem(shm); @@ -42,52 +46,25 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) shm->kaddr = NULL; } -static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops = { +static const struct tee_shm_pool_ops pool_ops = { .alloc = pool_op_alloc, .free = pool_op_free, - .destroy_poolmgr = pool_op_destroy_poolmgr, + .destroy_pool = pool_op_destroy_pool, }; -static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void) -{ - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - - if (!mgr) - return ERR_PTR(-ENOMEM); - - mgr->ops = &pool_ops; - - return mgr; -} - struct tee_shm_pool *amdtee_config_shm(void) { - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - rc = pool_mem_mgr_alloc(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = pool_mem_mgr_alloc(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; + if (!pool) + return ERR_PTR(-ENOMEM); - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } + pool->ops = &pool_ops; - return rc; + return pool; } diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3ca71e3812ed..f121c224e682 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -7,11 +7,3 @@ config OPTEE help This implements the OP-TEE Trusted Execution Environment (TEE) driver. - -config OPTEE_SHM_NUM_PRIV_PAGES - int "Private Shared Memory Pages" - default 1 - depends on OPTEE - help - This sets the number of private shared memory pages to be - used by OP-TEE TEE driver. diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 17a6f51d3089..f4bccb5f0e93 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -18,8 +18,8 @@ #include #include "optee_private.h" -int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size, +int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align, int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, @@ -30,6 +30,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, struct page *page; int rc = 0; + /* + * Ignore alignment since this is already going to be page aligned + * and there's no need for any larger alignment. + */ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!page) return -ENOMEM; @@ -51,7 +55,6 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, for (i = 0; i < nr_pages; i++) pages[i] = page + i; - shm->flags |= TEE_SHM_REGISTER; rc = shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); kfree(pages); @@ -62,7 +65,7 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, return 0; err: - __free_pages(page, order); + free_pages((unsigned long)shm->kaddr, order); return rc; } diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 545f61af1248..91dd80945bf1 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -369,14 +369,14 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx, * The main function is optee_ffa_shm_pool_alloc_pages(). */ -static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_ffa_op_alloc(struct tee_shm_pool *pool, + struct tee_shm *shm, size_t size, size_t align) { - return optee_pool_op_alloc_helper(poolm, shm, size, + return optee_pool_op_alloc_helper(pool, shm, size, align, optee_ffa_shm_register); } -static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, +static void pool_ffa_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { optee_ffa_shm_unregister(shm->ctx, shm); @@ -384,15 +384,15 @@ static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, shm->kaddr = NULL; } -static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { +static const struct tee_shm_pool_ops pool_ffa_ops = { .alloc = pool_ffa_op_alloc, .free = pool_ffa_op_free, - .destroy_poolmgr = pool_ffa_op_destroy_poolmgr, + .destroy_pool = pool_ffa_op_destroy_pool, }; /** @@ -401,16 +401,16 @@ static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { * This pool is used with OP-TEE over FF-A. In this case command buffers * and such are allocated from kernel's own memory. */ -static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) +static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void) { - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!mgr) + if (!pool) return ERR_PTR(-ENOMEM); - mgr->ops = &pool_ffa_ops; + pool->ops = &pool_ffa_ops; - return mgr; + return pool; } /* @@ -691,33 +691,6 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, return true; } -static struct tee_shm_pool *optee_ffa_config_dyn_shm(void) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - rc = optee_ffa_shm_pool_alloc_pages(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = optee_ffa_shm_pool_alloc_pages(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } - - return rc; -} - static void optee_ffa_get_version(struct tee_device *teedev, struct tee_ioctl_version_data *vers) { @@ -815,7 +788,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (!optee) return -ENOMEM; - pool = optee_ffa_config_dyn_shm(); + pool = optee_ffa_shm_pool_alloc_pages(); if (IS_ERR(pool)) { rc = PTR_ERR(pool); goto err_free_optee; diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 92bc47bef95f..df3a483bbf46 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -229,8 +229,8 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); int optee_enumerate_devices(u32 func); void optee_unregister_devices(void); -int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size, +int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align, int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index bacd1a1d79ee..3dc8cbea1a24 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -42,7 +42,15 @@ * 6. Driver initialization. */ -#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES +/* + * A typical OP-TEE private shm allocation is 224 bytes (argument struct + * with 6 parameters, needed for open session). So with an alignment of 512 + * we'll waste a bit more than 50%. However, it's only expected that we'll + * have a handful of these structs allocated at a time. Most memory will + * be allocated aligned to the page size, So all in all this should scale + * up and down quite well. + */ +#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */ /* * 1. Convert between struct tee_param and struct optee_msg_param @@ -532,20 +540,21 @@ static int optee_shm_unregister_supp(struct tee_context *ctx, * The main function is optee_shm_pool_alloc_pages(). */ -static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_op_alloc(struct tee_shm_pool *pool, + struct tee_shm *shm, size_t size, size_t align) { /* * Shared memory private to the OP-TEE driver doesn't need * to be registered with OP-TEE. */ if (shm->flags & TEE_SHM_PRIV) - return optee_pool_op_alloc_helper(poolm, shm, size, NULL); + return optee_pool_op_alloc_helper(pool, shm, size, align, NULL); - return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register); + return optee_pool_op_alloc_helper(pool, shm, size, align, + optee_shm_register); } -static void pool_op_free(struct tee_shm_pool_mgr *poolm, +static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { if (!(shm->flags & TEE_SHM_PRIV)) @@ -555,15 +564,15 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, shm->kaddr = NULL; } -static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_destroy_pool(struct tee_shm_pool *pool) { - kfree(poolm); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops = { +static const struct tee_shm_pool_ops pool_ops = { .alloc = pool_op_alloc, .free = pool_op_free, - .destroy_poolmgr = pool_op_destroy_poolmgr, + .destroy_pool = pool_op_destroy_pool, }; /** @@ -572,16 +581,16 @@ static const struct tee_shm_pool_mgr_ops pool_ops = { * This pool is used when OP-TEE supports dymanic SHM. In this case * command buffers and such are allocated from kernel's own memory. */ -static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) +static struct tee_shm_pool *optee_shm_pool_alloc_pages(void) { - struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!mgr) + if (!pool) return ERR_PTR(-ENOMEM); - mgr->ops = &pool_ops; + pool->ops = &pool_ops; - return mgr; + return pool; } /* @@ -1153,33 +1162,6 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, return true; } -static struct tee_shm_pool *optee_config_dyn_shm(void) -{ - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; - void *rc; - - rc = optee_shm_pool_alloc_pages(); - if (IS_ERR(rc)) - return rc; - priv_mgr = rc; - - rc = optee_shm_pool_alloc_pages(); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - return rc; - } - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); - if (IS_ERR(rc)) { - tee_shm_pool_mgr_destroy(priv_mgr); - tee_shm_pool_mgr_destroy(dmabuf_mgr); - } - - return rc; -} - static struct tee_shm_pool * optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) { @@ -1193,10 +1175,7 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) phys_addr_t begin; phys_addr_t end; void *va; - struct tee_shm_pool_mgr *priv_mgr; - struct tee_shm_pool_mgr *dmabuf_mgr; void *rc; - const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.status != OPTEE_SMC_RETURN_OK) { @@ -1214,11 +1193,6 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) paddr = begin; size = end - begin; - if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { - pr_err("too small shared memory area\n"); - return ERR_PTR(-EINVAL); - } - va = memremap(paddr, size, MEMREMAP_WB); if (!va) { pr_err("shared memory ioremap failed\n"); @@ -1226,35 +1200,13 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) } vaddr = (unsigned long)va; - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, - 3 /* 8 bytes aligned */); - if (IS_ERR(rc)) - goto err_memunmap; - priv_mgr = rc; - - vaddr += sz; - paddr += sz; - size -= sz; - - rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); - if (IS_ERR(rc)) - goto err_free_priv_mgr; - dmabuf_mgr = rc; - - rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size, + OPTEE_MIN_STATIC_POOL_ALIGN); if (IS_ERR(rc)) - goto err_free_dmabuf_mgr; - - *memremaped_shm = va; - - return rc; + memunmap(va); + else + *memremaped_shm = va; -err_free_dmabuf_mgr: - tee_shm_pool_mgr_destroy(dmabuf_mgr); -err_free_priv_mgr: - tee_shm_pool_mgr_destroy(priv_mgr); -err_memunmap: - memunmap(va); return rc; } @@ -1376,7 +1328,7 @@ static int optee_probe(struct platform_device *pdev) * Try to use dynamic shared memory if possible */ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) - pool = optee_config_dyn_shm(); + pool = optee_shm_pool_alloc_pages(); /* * If dynamic shared memory is not available or failed - try static one diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index e09c8aa5d967..7265f47c6d8e 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -12,17 +12,6 @@ #include #include -/** - * struct tee_shm_pool - shared memory pool - * @private_mgr: pool manager for shared memory only between kernel - * and secure world - * @dma_buf_mgr: pool manager for shared memory exported to user space - */ -struct tee_shm_pool { - struct tee_shm_pool_mgr *private_mgr; - struct tee_shm_pool_mgr *dma_buf_mgr; -}; - #define TEE_DEVICE_FLAG_REGISTERED 0x1 #define TEE_MAX_DEV_NAME_LEN 32 diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 7e7e762fc1de..f0a9cccd2f2c 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -31,14 +31,7 @@ static void release_registered_pages(struct tee_shm *shm) static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { if (shm->flags & TEE_SHM_POOL) { - struct tee_shm_pool_mgr *poolm; - - if (shm->flags & TEE_SHM_DMA_BUF) - poolm = teedev->pool->dma_buf_mgr; - else - poolm = teedev->pool->private_mgr; - - poolm->ops->free(poolm, shm); + teedev->pool->ops->free(teedev->pool, shm); } else if (shm->flags & TEE_SHM_REGISTER) { int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); @@ -59,8 +52,8 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) { struct tee_device *teedev = ctx->teedev; - struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm *shm; + size_t align; void *ret; int rc; @@ -93,12 +86,18 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) refcount_set(&shm->refcount, 1); shm->flags = flags | TEE_SHM_POOL; shm->ctx = ctx; - if (flags & TEE_SHM_DMA_BUF) - poolm = teedev->pool->dma_buf_mgr; - else - poolm = teedev->pool->private_mgr; + if (flags & TEE_SHM_DMA_BUF) { + align = PAGE_SIZE; + /* + * Request to register the shm in the pool allocator below + * if supported. + */ + shm->flags |= TEE_SHM_REGISTER; + } else { + align = 2 * sizeof(long); + } - rc = poolm->ops->alloc(poolm, shm, size); + rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); if (rc) { ret = ERR_PTR(rc); goto err_kfree; @@ -118,7 +117,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) return shm; err_pool_free: - poolm->ops->free(poolm, shm); + teedev->pool->ops->free(teedev->pool, shm); err_kfree: kfree(shm); err_dev_put: diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index 54c11aa374a8..71e0f8ae69aa 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015 Linaro Limited + * Copyright (c) 2015, 2017, 2022 Linaro Limited */ #include #include @@ -9,14 +9,16 @@ #include #include "tee_private.h" -static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm, size_t size) +static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align) { unsigned long va; - struct gen_pool *genpool = poolm->private_data; - size_t s = roundup(size, 1 << genpool->min_alloc_order); + struct gen_pool *genpool = pool->private_data; + size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order)); + struct genpool_data_align data = { .align = a }; + size_t s = roundup(size, a); - va = gen_pool_alloc(genpool, s); + va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data); if (!va) return -ENOMEM; @@ -24,107 +26,67 @@ static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, shm->kaddr = (void *)va; shm->paddr = gen_pool_virt_to_phys(genpool, va); shm->size = s; + /* + * This is from a static shared memory pool so no need to register + * each chunk, and no need to unregister later either. + */ + shm->flags &= ~TEE_SHM_REGISTER; return 0; } -static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, - struct tee_shm *shm) +static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm) { - gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + gen_pool_free(pool->private_data, (unsigned long)shm->kaddr, shm->size); shm->kaddr = NULL; } -static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool) { - gen_pool_destroy(poolm->private_data); - kfree(poolm); + gen_pool_destroy(pool->private_data); + kfree(pool); } -static const struct tee_shm_pool_mgr_ops pool_ops_generic = { +static const struct tee_shm_pool_ops pool_ops_generic = { .alloc = pool_op_gen_alloc, .free = pool_op_gen_free, - .destroy_poolmgr = pool_op_gen_destroy_poolmgr, + .destroy_pool = pool_op_gen_destroy_pool, }; -struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, - size_t size, - int min_alloc_order) +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order) { const size_t page_mask = PAGE_SIZE - 1; - struct tee_shm_pool_mgr *mgr; + struct tee_shm_pool *pool; int rc; /* Start and end must be page aligned */ if (vaddr & page_mask || paddr & page_mask || size & page_mask) return ERR_PTR(-EINVAL); - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) return ERR_PTR(-ENOMEM); - mgr->private_data = gen_pool_create(min_alloc_order, -1); - if (!mgr->private_data) { + pool->private_data = gen_pool_create(min_alloc_order, -1); + if (!pool->private_data) { rc = -ENOMEM; goto err; } - gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL); - rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); + rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1); if (rc) { - gen_pool_destroy(mgr->private_data); + gen_pool_destroy(pool->private_data); goto err; } - mgr->ops = &pool_ops_generic; + pool->ops = &pool_ops_generic; - return mgr; + return pool; err: - kfree(mgr); + kfree(pool); return ERR_PTR(rc); } -EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem); - -static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr) -{ - return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free && - mgr->ops->destroy_poolmgr; -} - -struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, - struct tee_shm_pool_mgr *dmabuf_mgr) -{ - struct tee_shm_pool *pool; - - if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr)) - return ERR_PTR(-EINVAL); - - pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) - return ERR_PTR(-ENOMEM); - - pool->private_mgr = priv_mgr; - pool->dma_buf_mgr = dmabuf_mgr; - - return pool; -} -EXPORT_SYMBOL_GPL(tee_shm_pool_alloc); - -/** - * tee_shm_pool_free() - Free a shared memory pool - * @pool: The shared memory pool to free - * - * There must be no remaining shared memory allocated from this pool when - * this function is called. - */ -void tee_shm_pool_free(struct tee_shm_pool *pool) -{ - if (pool->private_mgr) - tee_shm_pool_mgr_destroy(pool->private_mgr); - if (pool->dma_buf_mgr) - tee_shm_pool_mgr_destroy(pool->dma_buf_mgr); - kfree(pool); -} -EXPORT_SYMBOL_GPL(tee_shm_pool_free); +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index a4393c8c38f3..ed641dc314bd 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2016 Linaro Limited + * Copyright (c) 2015-2022 Linaro Limited */ #ifndef __TEE_DRV_H @@ -221,62 +221,39 @@ struct tee_shm { }; /** - * struct tee_shm_pool_mgr - shared memory manager + * struct tee_shm_pool - shared memory pool * @ops: operations * @private_data: private data for the shared memory manager */ -struct tee_shm_pool_mgr { - const struct tee_shm_pool_mgr_ops *ops; +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; void *private_data; }; /** - * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * struct tee_shm_pool_ops - shared memory pool operations * @alloc: called when allocating shared memory * @free: called when freeing shared memory - * @destroy_poolmgr: called when destroying the pool manager + * @destroy_pool: called when destroying the pool */ -struct tee_shm_pool_mgr_ops { - int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, - size_t size); - void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); - void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align); + void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); + void (*destroy_pool)(struct tee_shm_pool *pool); }; -/** - * tee_shm_pool_alloc() - Create a shared memory pool from shm managers - * @priv_mgr: manager for driver private shared memory allocations - * @dmabuf_mgr: manager for dma-buf shared memory allocations - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, - struct tee_shm_pool_mgr *dmabuf_mgr); - /* - * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved - * memory + * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory * @vaddr: Virtual address of start of pool * @paddr: Physical address of start of pool * @size: Size in bytes of the pool * - * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. - */ -struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, - size_t size, - int min_alloc_order); - -/** - * tee_shm_pool_mgr_destroy() - Free a shared memory manager + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. */ -static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) -{ - poolm->ops->destroy_poolmgr(poolm); -} +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order); /** * tee_shm_pool_free() - Free a shared memory pool @@ -285,7 +262,10 @@ static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) * The must be no remaining shared memory allocated from this pool when * this function is called. */ -void tee_shm_pool_free(struct tee_shm_pool *pool); +static inline void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->ops->destroy_pool(pool); +} /** * tee_get_drvdata() - Return driver_data pointer -- cgit v1.2.3-71-gd317 From 5d41f1b3e3282909b6bbceacb9aebe1d3c849a49 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:54 +0100 Subject: tee: replace tee_shm_alloc() tee_shm_alloc() is replaced by three new functions, tee_shm_alloc_user_buf() - for user mode allocations, replacing passing the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF tee_shm_alloc_kernel_buf() - for kernel mode allocations, slightly optimized compared to using the flags TEE_SHM_MAPPED | TEE_SHM_DMA_BUF. tee_shm_alloc_priv_buf() - primarily for TEE driver internal use. This also makes the interface easier to use as we can get rid of the somewhat hard to use flags parameter. The TEE subsystem and the TEE drivers are updated to use the new functions instead. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/call.c | 2 +- drivers/tee/optee/device.c | 5 +- drivers/tee/optee/ffa_abi.c | 4 +- drivers/tee/optee/smc_abi.c | 6 +-- drivers/tee/tee_shm.c | 108 ++++++++++++++++++++++++++++---------------- include/linux/tee_drv.h | 16 +------ 6 files changed, 76 insertions(+), 65 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index b25cc1fac945..bd49ec934060 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -120,7 +120,7 @@ struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, if (optee->rpc_arg_count) sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count); - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(ctx, sz); if (IS_ERR(shm)) return shm; diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 128a2d2a50a1..f3947be13e2e 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -121,10 +121,9 @@ static int __optee_enumerate_devices(u32 func) if (rc < 0 || !shm_size) goto out_sess; - device_shm = tee_shm_alloc(ctx, shm_size, - TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size); if (IS_ERR(device_shm)) { - pr_err("tee_shm_alloc failed\n"); + pr_err("tee_shm_alloc_kernel_buf failed\n"); rc = PTR_ERR(device_shm); goto out_sess; } diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 91dd80945bf1..fb7345941024 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -440,8 +440,8 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, + arg->params[0].u.value.b); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 3dc8cbea1a24..7580d52b3852 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -661,8 +661,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = optee_rpc_cmd_alloc_suppl(ctx, sz); break; case OPTEE_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(optee->ctx, sz, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, sz); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -787,8 +786,7 @@ static void optee_handle_rpc(struct tee_context *ctx, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(optee->ctx, param->a1, - TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); reg_pair_from_64(¶m->a4, ¶m->a5, diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index f0a9cccd2f2c..dd748d572691 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -49,25 +49,14 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) tee_device_put(teedev); } -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size, + size_t align, u32 flags, int id) { struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; - size_t align; void *ret; int rc; - if (!(flags & TEE_SHM_MAPPED)) { - dev_err(teedev->dev.parent, - "only mapped allocations supported\n"); - return ERR_PTR(-EINVAL); - } - - if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) { - dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); - return ERR_PTR(-EINVAL); - } - if (!tee_device_get(teedev)) return ERR_PTR(-EINVAL); @@ -84,18 +73,16 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) } refcount_set(&shm->refcount, 1); - shm->flags = flags | TEE_SHM_POOL; + shm->flags = flags; + shm->id = id; + + /* + * We're assigning this as it is needed if the shm is to be + * registered. If this function returns OK then the caller expected + * to call teedev_ctx_get() or clear shm->ctx in case it's not + * needed any longer. + */ shm->ctx = ctx; - if (flags & TEE_SHM_DMA_BUF) { - align = PAGE_SIZE; - /* - * Request to register the shm in the pool allocator below - * if supported. - */ - shm->flags |= TEE_SHM_REGISTER; - } else { - align = 2 * sizeof(long); - } rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); if (rc) { @@ -103,28 +90,14 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) goto err_kfree; } - if (flags & TEE_SHM_DMA_BUF) { - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err_pool_free; - } - } - teedev_ctx_get(ctx); - return shm; -err_pool_free: - teedev->pool->ops->free(teedev->pool, shm); err_kfree: kfree(shm); err_dev_put: tee_device_put(teedev); return ret; } -EXPORT_SYMBOL_GPL(tee_shm_alloc); /** * tee_shm_alloc_user_buf() - Allocate shared memory for user space @@ -140,7 +113,36 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc); */ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) { - return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + u32 flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_REGISTER | + TEE_SHM_POOL; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + void *ret; + int id; + + mutex_lock(&teedev->mutex); + id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (id < 0) + return ERR_PTR(id); + + shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id); + if (IS_ERR(shm)) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, id); + mutex_unlock(&teedev->mutex); + return shm; + } + + mutex_lock(&teedev->mutex); + ret = idr_replace(&teedev->idr, shm, id); + mutex_unlock(&teedev->mutex); + if (IS_ERR(ret)) { + tee_shm_free(shm); + return ret; + } + + return shm; } /** @@ -157,10 +159,36 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { - return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED); + u32 flags = TEE_SHM_MAPPED | TEE_SHM_REGISTER | TEE_SHM_POOL; + + return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); } EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); +/** + * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared + * kernel buffer + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * + * This function returns similar shared memory as + * tee_shm_alloc_kernel_buf(), but with the difference that the memory + * might not be registered in secure world in case the driver supports + * passing memory not registered in advance. + * + * This function should normally only be used internally in the TEE + * drivers. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) +{ + u32 flags = TEE_SHM_MAPPED | TEE_SHM_PRIV | TEE_SHM_POOL; + + return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); +} +EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); + struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index ed641dc314bd..7f038f8787c7 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -273,21 +273,7 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool) */ void *tee_get_drvdata(struct tee_device *teedev); -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If - * TEE_SHM_DMA_BUF global shared memory will be allocated and associated - * with a dma-buf handle, else driver private memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** -- cgit v1.2.3-71-gd317 From 056d3fed3d1ff3f5d699be337f048f9eed2befaf Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:56 +0100 Subject: tee: add tee_shm_register_{user,kernel}_buf() Adds the two new functions tee_shm_register_user_buf() and tee_shm_register_kernel_buf() which should be used instead of the old tee_shm_register(). This avoids having the caller supplying the flags parameter which exposes a bit more than desired of the internals of the TEE subsystem. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 3 +-- drivers/tee/tee_private.h | 2 ++ drivers/tee/tee_shm.c | 33 +++++++++++++++++++++++++++++++++ include/linux/tee_drv.h | 2 ++ 4 files changed, 38 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index a15812baaeb1..8aa1a4836b92 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -334,8 +334,7 @@ tee_ioctl_shm_register(struct tee_context *ctx, if (data.flags) return -EINVAL; - shm = tee_shm_register(ctx, data.addr, data.length, - TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + shm = tee_shm_register_user_buf(ctx, data.addr, data.length); if (IS_ERR(shm)) return PTR_ERR(shm); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 7265f47c6d8e..409cadcc1cff 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -58,5 +58,7 @@ void teedev_ctx_get(struct tee_context *ctx); void teedev_ctx_put(struct tee_context *ctx); struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); +struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, + unsigned long addr, size_t length); #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index dd748d572691..359bab36e163 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -299,6 +299,39 @@ err: } EXPORT_SYMBOL_GPL(tee_shm_register); +/** + * tee_shm_register_user_buf() - Register a userspace shared memory buffer + * @ctx: Context that registers the shared memory + * @addr: The userspace address of the shared buffer + * @length: Length of the shared buffer + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, + unsigned long addr, size_t length) +{ + return tee_shm_register(ctx, addr, length, + TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); +} + +/** + * tee_shm_register_kernel_buf() - Register kernel memory to be shared with + * secure world + * @ctx: Context that registers the shared memory + * @addr: The buffer + * @length: Length of the buffer + * + * @returns a pointer to 'struct tee_shm' + */ + +struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, + void *addr, size_t length) +{ + return tee_shm_register(ctx, (unsigned long)addr, length, + TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED); +} +EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); + static int tee_shm_fop_release(struct inode *inode, struct file *filp) { tee_shm_put(filp->private_data); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 7f038f8787c7..c9d2cc32a5ed 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -287,6 +287,8 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); */ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags); +struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, + void *addr, size_t length); /** * tee_shm_is_registered() - Check if shared memory object in registered in TEE -- cgit v1.2.3-71-gd317 From 53e16519c2eccdb2e1b123405466a29aaea1132e Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:58 +0100 Subject: tee: replace tee_shm_register() tee_shm_register() is replaced by the previously introduced functions tee_shm_register_user_buf() and tee_shm_register_kernel_buf(). Since there are not external callers left we can remove tee_shm_register() and refactor the remains. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 156 ++++++++++++++++++++++++++++-------------------- include/linux/tee_drv.h | 11 ---- 2 files changed, 90 insertions(+), 77 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 359bab36e163..9db571253802 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -12,17 +12,43 @@ #include #include "tee_private.h" +static void shm_put_kernel_pages(struct page **pages, size_t page_count) +{ + size_t n; + + for (n = 0; n < page_count; n++) + put_page(pages[n]); +} + +static int shm_get_kernel_pages(unsigned long start, size_t page_count, + struct page **pages) +{ + struct kvec *kiov; + size_t n; + int rc; + + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); + if (!kiov) + return -ENOMEM; + + for (n = 0; n < page_count; n++) { + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); + kiov[n].iov_len = PAGE_SIZE; + } + + rc = get_kernel_pages(kiov, page_count, 0, pages); + kfree(kiov); + + return rc; +} + static void release_registered_pages(struct tee_shm *shm) { if (shm->pages) { - if (shm->flags & TEE_SHM_USER_MAPPED) { + if (shm->flags & TEE_SHM_USER_MAPPED) unpin_user_pages(shm->pages, shm->num_pages); - } else { - size_t n; - - for (n = 0; n < shm->num_pages; n++) - put_page(shm->pages[n]); - } + else + shm_put_kernel_pages(shm->pages, shm->num_pages); kfree(shm->pages); } @@ -189,28 +215,24 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); -struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, - size_t length, u32 flags) +static struct tee_shm * +register_shm_helper(struct tee_context *ctx, unsigned long addr, + size_t length, u32 flags, int id) { struct tee_device *teedev = ctx->teedev; - const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; - const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED; struct tee_shm *shm; + unsigned long start; + size_t num_pages; void *ret; int rc; - int num_pages; - unsigned long start; - - if (flags != req_user_flags && flags != req_kernel_flags) - return ERR_PTR(-ENOTSUPP); if (!tee_device_get(teedev)) return ERR_PTR(-EINVAL); if (!teedev->desc->ops->shm_register || !teedev->desc->ops->shm_unregister) { - tee_device_put(teedev); - return ERR_PTR(-ENOTSUPP); + ret = ERR_PTR(-ENOTSUPP); + goto err_dev_put; } teedev_ctx_get(ctx); @@ -218,13 +240,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, shm = kzalloc(sizeof(*shm), GFP_KERNEL); if (!shm) { ret = ERR_PTR(-ENOMEM); - goto err; + goto err_ctx_put; } refcount_set(&shm->refcount, 1); - shm->flags = flags | TEE_SHM_REGISTER; + shm->flags = flags; shm->ctx = ctx; - shm->id = -1; + shm->id = id; addr = untagged_addr(addr); start = rounddown(addr, PAGE_SIZE); shm->offset = addr - start; @@ -233,71 +255,45 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); if (!shm->pages) { ret = ERR_PTR(-ENOMEM); - goto err; + goto err_free_shm; } - if (flags & TEE_SHM_USER_MAPPED) { + if (flags & TEE_SHM_USER_MAPPED) rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); - } else { - struct kvec *kiov; - int i; - - kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL); - if (!kiov) { - ret = ERR_PTR(-ENOMEM); - goto err; - } - - for (i = 0; i < num_pages; i++) { - kiov[i].iov_base = (void *)(start + i * PAGE_SIZE); - kiov[i].iov_len = PAGE_SIZE; - } - - rc = get_kernel_pages(kiov, num_pages, 0, shm->pages); - kfree(kiov); - } + else + rc = shm_get_kernel_pages(start, num_pages, shm->pages); if (rc > 0) shm->num_pages = rc; if (rc != num_pages) { if (rc >= 0) rc = -ENOMEM; ret = ERR_PTR(rc); - goto err; - } - - mutex_lock(&teedev->mutex); - shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); - mutex_unlock(&teedev->mutex); - - if (shm->id < 0) { - ret = ERR_PTR(shm->id); - goto err; + goto err_put_shm_pages; } rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, shm->num_pages, start); if (rc) { ret = ERR_PTR(rc); - goto err; + goto err_put_shm_pages; } return shm; -err: - if (shm) { - if (shm->id >= 0) { - mutex_lock(&teedev->mutex); - idr_remove(&teedev->idr, shm->id); - mutex_unlock(&teedev->mutex); - } - release_registered_pages(shm); - } +err_put_shm_pages: + if (flags & TEE_SHM_USER_MAPPED) + unpin_user_pages(shm->pages, shm->num_pages); + else + shm_put_kernel_pages(shm->pages, shm->num_pages); + kfree(shm->pages); +err_free_shm: kfree(shm); +err_ctx_put: teedev_ctx_put(ctx); +err_dev_put: tee_device_put(teedev); return ret; } -EXPORT_SYMBOL_GPL(tee_shm_register); /** * tee_shm_register_user_buf() - Register a userspace shared memory buffer @@ -310,8 +306,35 @@ EXPORT_SYMBOL_GPL(tee_shm_register); struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, unsigned long addr, size_t length) { - return tee_shm_register(ctx, addr, length, - TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED | TEE_SHM_REGISTER; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + void *ret; + int id; + + mutex_lock(&teedev->mutex); + id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (id < 0) + return ERR_PTR(id); + + shm = register_shm_helper(ctx, addr, length, flags, id); + if (IS_ERR(shm)) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, id); + mutex_unlock(&teedev->mutex); + return shm; + } + + mutex_lock(&teedev->mutex); + ret = idr_replace(&teedev->idr, shm, id); + mutex_unlock(&teedev->mutex); + if (IS_ERR(ret)) { + tee_shm_free(shm); + return ret; + } + + return shm; } /** @@ -327,8 +350,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length) { - return tee_shm_register(ctx, (unsigned long)addr, length, - TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED); + u32 flags = TEE_SHM_REGISTER | TEE_SHM_KERNEL_MAPPED; + + return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); } EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index c9d2cc32a5ed..a3b663ef0694 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -276,17 +276,6 @@ void *tee_get_drvdata(struct tee_device *teedev); struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); -/** - * tee_shm_register() - Register shared memory buffer - * @ctx: Context that registers the shared memory - * @addr: Address is userspace of the shared buffer - * @length: Length of the shared buffer - * @flags: Flags setting properties for the requested shared memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, - size_t length, u32 flags); struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); -- cgit v1.2.3-71-gd317 From a45ea4efa358577c623d7353a6ba9af3c17f6ca0 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 4 Feb 2022 10:33:59 +0100 Subject: tee: refactor TEE_SHM_* flags Removes the redundant TEE_SHM_DMA_BUF, TEE_SHM_EXT_DMA_BUF, TEE_SHM_MAPPED and TEE_SHM_KERNEL_MAPPED flags. TEE_SHM_REGISTER is renamed to TEE_SHM_DYNAMIC in order to better match its usage. Assigns new values to the remaining flags to void gaps. Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/smc_abi.c | 4 ++-- drivers/tee/tee_shm.c | 23 +++++++++++------------ drivers/tee/tee_shm_pool.c | 2 +- include/linux/tee_drv.h | 21 +++++++++------------ 4 files changed, 23 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index ef3e27b602e6..e924928c2673 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -238,7 +238,7 @@ static int optee_to_msg_param(struct optee *optee, case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - if (tee_shm_is_registered(p->u.memref.shm)) + if (tee_shm_is_dynamic(p->u.memref.shm)) rc = to_msg_param_reg_mem(mp, p); else rc = to_msg_param_tmp_mem(mp, p); @@ -679,7 +679,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, sz = tee_shm_get_size(shm); - if (tee_shm_is_registered(shm)) { + if (tee_shm_is_dynamic(shm)) { struct page **pages; u64 *pages_list; size_t page_num; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 9db571253802..f31e29e8f1ca 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -58,7 +58,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { if (shm->flags & TEE_SHM_POOL) { teedev->pool->ops->free(teedev->pool, shm); - } else if (shm->flags & TEE_SHM_REGISTER) { + } else if (shm->flags & TEE_SHM_DYNAMIC) { int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); if (rc) @@ -139,8 +139,7 @@ err_dev_put: */ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_REGISTER | - TEE_SHM_POOL; + u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; void *ret; @@ -185,7 +184,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_REGISTER | TEE_SHM_POOL; + u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); } @@ -209,7 +208,7 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); */ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) { - u32 flags = TEE_SHM_MAPPED | TEE_SHM_PRIV | TEE_SHM_POOL; + u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL; return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); } @@ -306,7 +305,7 @@ err_dev_put: struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, unsigned long addr, size_t length) { - u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED | TEE_SHM_REGISTER; + u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC; struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; void *ret; @@ -350,7 +349,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length) { - u32 flags = TEE_SHM_REGISTER | TEE_SHM_KERNEL_MAPPED; + u32 flags = TEE_SHM_DYNAMIC; return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1); } @@ -394,7 +393,7 @@ int tee_shm_get_fd(struct tee_shm *shm) { int fd; - if (!(shm->flags & TEE_SHM_DMA_BUF)) + if (shm->id < 0) return -EINVAL; /* matched by tee_shm_put() in tee_shm_op_release() */ @@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(tee_shm_free); */ int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return -EINVAL; /* Check that we're in the range of the shm */ if ((char *)va < (char *)shm->kaddr) @@ -446,7 +445,7 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa); */ int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return -EINVAL; /* Check that we're in the range of the shm */ if (pa < shm->paddr) @@ -474,7 +473,7 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va); */ void *tee_shm_get_va(struct tee_shm *shm, size_t offs) { - if (!(shm->flags & TEE_SHM_MAPPED)) + if (!shm->kaddr) return ERR_PTR(-EINVAL); if (offs >= shm->size) return ERR_PTR(-EINVAL); @@ -549,7 +548,7 @@ void tee_shm_put(struct tee_shm *shm) * the refcount_inc() in tee_shm_get_from_id() never starts * from 0. */ - if (shm->flags & TEE_SHM_DMA_BUF) + if (shm->id >= 0) idr_remove(&teedev->idr, shm->id); do_release = true; } diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index 71e0f8ae69aa..058bfbac657a 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -30,7 +30,7 @@ static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, * This is from a static shared memory pool so no need to register * each chunk, and no need to unregister later either. */ - shm->flags &= ~TEE_SHM_REGISTER; + shm->flags &= ~TEE_SHM_DYNAMIC; return 0; } diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index a3b663ef0694..911cad324acc 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -20,14 +20,11 @@ * specific TEE driver. */ -#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ -#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ -#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ -#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ -#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ -#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ -#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ -#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ +#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ + /* in secure world */ +#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ +#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -280,13 +277,13 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); /** - * tee_shm_is_registered() - Check if shared memory object in registered in TEE + * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind * @shm: Shared memory handle - * @returns true if object is registered in TEE + * @returns true if object is dynamic shared memory */ -static inline bool tee_shm_is_registered(struct tee_shm *shm) +static inline bool tee_shm_is_dynamic(struct tee_shm *shm) { - return shm && (shm->flags & TEE_SHM_REGISTER); + return shm && (shm->flags & TEE_SHM_DYNAMIC); } /** -- cgit v1.2.3-71-gd317 From a257cacc38718c83cee003487e03197f237f5c3f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 15 Feb 2022 13:41:02 +0100 Subject: asm-generic: Define CONFIG_HAVE_FUNCTION_DESCRIPTORS Replace HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR by a config option named CONFIG_HAVE_FUNCTION_DESCRIPTORS and use it instead of 'dereference_function_descriptor' macro to know whether an arch has function descriptors. To limit churn in one of the following patches, use an #ifdef/#else construct with empty first part instead of an #ifndef in asm-generic/sections.h On powerpc, make sure the config option matches the ABI used by the compiler with a BUILD_BUG_ON() and add missing _CALL_ELF=2 when calling 'sparse' so that sparse sees the same piece of code as GCC. And include a helper to check whether an arch has function descriptors or not : have_function_descriptors() Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Reviewed-by: Nicholas Piggin Acked-by: Helge Deller Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/4a0f11fb0ea74a3197bc44dd7ba25e53a24fd03d.1644928018.git.christophe.leroy@csgroup.eu --- arch/Kconfig | 3 +++ arch/ia64/Kconfig | 1 + arch/ia64/include/asm/sections.h | 2 -- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/sections.h | 2 -- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/sections.h | 2 -- arch/powerpc/kernel/ptrace/ptrace.c | 6 ++++++ include/asm-generic/sections.h | 8 +++++++- include/linux/kallsyms.h | 2 +- 10 files changed, 20 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..fe24174cb63c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -205,6 +205,9 @@ config HAVE_FUNCTION_ERROR_INJECTION config HAVE_NMI bool +config HAVE_FUNCTION_DESCRIPTORS + bool + config TRACE_IRQFLAGS_SUPPORT bool diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a7e01573abd8..da85c3b23b16 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -35,6 +35,7 @@ config IA64 select HAVE_SETUP_PER_CPU_AREA select TTY select HAVE_ARCH_TRACEHOOK + select HAVE_FUNCTION_DESCRIPTORS select HAVE_VIRT_CPU_ACCOUNTING select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index 35f24e52149a..2460d365a057 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -27,8 +27,6 @@ extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_b extern char __start_unwind[], __end_unwind[]; extern char __start_ivt_text[], __end_ivt_text[]; -#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 - #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 43c1c880def6..82e7ab1a9764 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -69,6 +69,7 @@ config PARISC select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS select TRACE_IRQFLAGS_SUPPORT + select HAVE_FUNCTION_DESCRIPTORS if 64BIT help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h index bb52aea0cb21..c8092e4d94de 100644 --- a/arch/parisc/include/asm/sections.h +++ b/arch/parisc/include/asm/sections.h @@ -9,8 +9,6 @@ extern char __alt_instructions[], __alt_instructions_end[]; #ifdef CONFIG_64BIT -#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 - #undef dereference_function_descriptor void *dereference_function_descriptor(void *); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b779603978e1..a0c9cd0bbc85 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -202,6 +202,7 @@ config PPC select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_DESCRIPTORS if PPC64 && !CPU_LITTLE_ENDIAN select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index baca39f4c6d3..7728a7a146c3 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -56,8 +56,6 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) #ifdef PPC64_ELF_ABI_v1 -#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 - #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index c43f77e2ac31..1212a812a7ab 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -445,4 +445,10 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + +#ifdef PPC64_ELF_ABI_v1 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS)); +#else + BUILD_BUG_ON(IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS)); +#endif } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 690f741764e1..3ef83e1aebee 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -59,11 +59,17 @@ extern char __noinstr_text_start[], __noinstr_text_end[]; extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ -#ifndef dereference_function_descriptor +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS +#else #define dereference_function_descriptor(p) ((void *)(p)) #define dereference_kernel_function_descriptor(p) ((void *)(p)) #endif +static inline bool have_function_descriptors(void) +{ + return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS); +} + /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 4176c7eca7b5..ce1bd2fbf23e 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -48,7 +48,7 @@ static inline int is_ksym_addr(unsigned long addr) static inline void *dereference_symbol_descriptor(void *ptr) { -#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS struct module *mod; ptr = dereference_kernel_function_descriptor(ptr); -- cgit v1.2.3-71-gd317 From ba2689234be92024e5635d30fe744f4853ad97db Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 18 Nov 2021 13:59:46 +0000 Subject: arm64: entry: Add vectors that have the bhb mitigation sequences Some CPUs affected by Spectre-BHB need a sequence of branches, or a firmware call to be run before any indirect branch. This needs to go in the vectors. No CPU needs both. While this can be patched in, it would run on all CPUs as there is a single set of vectors. If only one part of a big/little combination is affected, the unaffected CPUs have to run the mitigation too. Create extra vectors that include the sequence. Subsequent patches will allow affected CPUs to select this set of vectors. Later patches will modify the loop count to match what the CPU requires. Reviewed-by: Catalin Marinas Signed-off-by: James Morse --- arch/arm64/include/asm/assembler.h | 24 +++++++++++++++++ arch/arm64/include/asm/vectors.h | 34 ++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 53 +++++++++++++++++++++++++++++++------- arch/arm64/kernel/proton-pack.c | 16 ++++++++++++ include/linux/arm-smccc.h | 5 ++++ 5 files changed, 123 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/include/asm/vectors.h (limited to 'include/linux') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e8bd0af0141c..046c38ee2841 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -850,4 +850,28 @@ alternative_endif #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ + .macro __mitigate_spectre_bhb_loop tmp +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + mov \tmp, #32 +.Lspectre_bhb_loop\@: + b . + 4 + subs \tmp, \tmp, #1 + b.ne .Lspectre_bhb_loop\@ + sb +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm + + /* Save/restores x0-x3 to the stack */ + .macro __mitigate_spectre_bhb_fw +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 +alternative_cb smccc_patch_fw_mitigation_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h new file mode 100644 index 000000000000..bac53fad037d --- /dev/null +++ b/arch/arm64/include/asm/vectors.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef __ASM_VECTORS_H +#define __ASM_VECTORS_H + +/* + * Note: the order of this enum corresponds to two arrays in entry.S: + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical + * 'full fat' vectors are used directly. + */ +enum arm64_bp_harden_el1_vectors { +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + /* + * Perform the BHB loop mitigation, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_LOOP, + + /* + * Make the SMC call for firmware mitigation, before branching to the + * canonical vectors. + */ + EL1_VECTOR_BHB_FW, +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + + /* + * Remap the kernel before branching to the canonical vectors. + */ + EL1_VECTOR_KPTI, ++}; + +#endif /* __ASM_VECTORS_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9c4ff75f983e..2ceb0c3647b4 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -646,13 +646,26 @@ alternative_else_nop_endif sub \dst, \dst, PAGE_SIZE .endm - .macro tramp_ventry, vector_start, regsize, kpti + +#define BHB_MITIGATION_NONE 0 +#define BHB_MITIGATION_LOOP 1 +#define BHB_MITIGATION_FW 2 + + .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + .if \bhb == BHB_MITIGATION_LOOP + /* + * This sequence must appear before the first indirect branch. i.e. the + * ret out of tramp_ventry. It appears here because x30 is free. + */ + __mitigate_spectre_bhb_loop x30 + .endif // \bhb == BHB_MITIGATION_LOOP + .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy @@ -680,6 +693,15 @@ alternative_else_nop_endif ldr x30, =vectors .endif // \kpti == 1 + .if \bhb == BHB_MITIGATION_FW + /* + * The firmware sequence must appear before the first indirect branch. + * i.e. the ret out of tramp_ventry. But it also needs the stack to be + * mapped to save/restore the registers the SMC clobbers. + */ + __mitigate_spectre_bhb_fw + .endif // \bhb == BHB_MITIGATION_FW + add x30, x30, #(1b - \vector_start + 4) ret .org 1b + 128 // Did we overflow the ventry slot? @@ -687,6 +709,9 @@ alternative_else_nop_endif .macro tramp_exit, regsize = 64 adr x30, tramp_vectors +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + add x30, x30, SZ_4K +#endif msr vbar_el1, x30 ldr lr, [sp, #S_LR] tramp_unmap_kernel x29 @@ -698,26 +723,32 @@ alternative_else_nop_endif sb .endm - .macro generate_tramp_vector, kpti + .macro generate_tramp_vector, kpti, bhb .Lvector_start\@: .space 0x400 .rept 4 - tramp_ventry .Lvector_start\@, 64, \kpti + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb .endr .rept 4 - tramp_ventry .Lvector_start\@, 32, \kpti + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb .endr .endm #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 /* * Exception vectors trampoline. + * The order must match __bp_harden_el1_vectors and the + * arm64_bp_harden_el1_vectors enum. */ .pushsection ".entry.tramp.text", "ax" .align 11 SYM_CODE_START_NOALIGN(tramp_vectors) - generate_tramp_vector kpti=1 +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE SYM_CODE_END(tramp_vectors) SYM_CODE_START(tramp_exit_native) @@ -744,7 +775,7 @@ SYM_DATA_END(__entry_tramp_data_start) * Exception vectors for spectre mitigations on entry from EL1 when * kpti is not in use. */ - .macro generate_el1_vector + .macro generate_el1_vector, bhb .Lvector_start\@: kernel_ventry 1, t, 64, sync // Synchronous EL1t kernel_ventry 1, t, 64, irq // IRQ EL1t @@ -757,17 +788,21 @@ SYM_DATA_END(__entry_tramp_data_start) kernel_ventry 1, h, 64, error // Error EL1h .rept 4 - tramp_ventry .Lvector_start\@, 64, kpti=0 + tramp_ventry .Lvector_start\@, 64, 0, \bhb .endr .rept 4 - tramp_ventry .Lvector_start\@, 32, kpti=0 + tramp_ventry .Lvector_start\@, 32, 0, \bhb .endr .endm +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ .pushsection ".entry.text", "ax" .align 11 SYM_CODE_START(__bp_harden_el1_vectors) - generate_el1_vector +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_el1_vector bhb=BHB_MITIGATION_LOOP + generate_el1_vector bhb=BHB_MITIGATION_FW +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ SYM_CODE_END(__bp_harden_el1_vectors) .popsection diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 9394f21d7566..6a5eeb8beea3 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -770,3 +770,19 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) return -ENODEV; } } + +/* Patched to NOP when enabled */ +void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); +} + +/* Patched to NOP when enabled */ +void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); +} diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 63ccb5252190..220c8c60e021 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -92,6 +92,11 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ -- cgit v1.2.3-71-gd317 From 08bc13d8efe30258850ecdc5d4f38c0bc07689c0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 10 Feb 2022 20:12:43 +0100 Subject: ieee80211: use tab to indent struct ieee80211_neighbor_ap_info Somehow spaces were used here, use tab instead. Link: https://lore.kernel.org/r/20220210201242.da8fa2e5ae8d.Ia452db01876e52e815f6337fef437049df0d8bd9@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 60ee7b3f58e7..a2940a853783 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4005,10 +4005,10 @@ static inline bool for_each_element_completed(const struct element *element, #define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 struct ieee80211_neighbor_ap_info { - u8 tbtt_info_hdr; - u8 tbtt_info_len; - u8 op_class; - u8 channel; + u8 tbtt_info_hdr; + u8 tbtt_info_len; + u8 op_class; + u8 channel; } __packed; enum ieee80211_range_params_max_total_ltf { -- cgit v1.2.3-71-gd317 From d61f4274daa41565b5f9ce589b4ba1239781c139 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 14 Feb 2022 17:29:21 +0100 Subject: ieee80211: add helper to check HE capability element size This element has a very dynamic structure, create a small helper function to validate its size. We're currently checking it in mac80211 in a conversion function, but that's actually slightly buggy. Link: https://lore.kernel.org/r/20220214172920.750bee9eaf37.Ie18359bd38143b7dc949078f10752413e6d36854@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a2940a853783..dfc84680af82 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2021 Intel Corporation + * Copyright (c) 2018 - 2022 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -2338,6 +2338,29 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) return n; } +static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data; + u8 needed = sizeof(*he_cap_ie_elem); + + if (len < needed) + return false; + + needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem); + if (len < needed) + return false; + + if (he_cap_ie_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + if (len < needed + 1) + return false; + needed += ieee80211_he_ppe_size(data[needed], + he_cap_ie_elem->phy_cap_info); + } + + return len >= needed; +} + /* HE Operation defines */ #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 -- cgit v1.2.3-71-gd317 From cbc1ca0a9d0a54cb8aa7c54c16e71599551e3f74 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 14 Feb 2022 17:29:51 +0100 Subject: ieee80211: Add EHT (802.11be) definitions Based on Draft P802.11be_D1.4. Signed-off-by: Ilan Peer Link: https://lore.kernel.org/r/20220214173004.928e23cacb2b.Id30a3ef2844b296efbd5486fe1da9ca36a95c5cf@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 299 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 299 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index dfc84680af82..7204bc28ca31 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -1925,6 +1926,111 @@ struct ieee80211_mu_edca_param_set { struct ieee80211_he_mu_edca_param_ac_rec ac_vo; } __packed; +#define IEEE80211_EHT_MCS_NSS_RX 0x0f +#define IEEE80211_EHT_MCS_NSS_TX 0xf0 + +/** + * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max + * supported NSS for per MCS. + * + * For each field below, bits 0 - 3 indicate the maximal number of spatial + * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams + * for Tx. + * + * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 0 - 7. + * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 8 - 9. + * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 10 - 11. + * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 12 - 13. + */ +struct ieee80211_eht_mcs_nss_supp_20mhz_only { + u8 rx_tx_mcs7_max_nss; + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; +}; + +/** + * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except + * 20MHz only stations). + * + * For each field below, bits 0 - 3 indicate the maximal number of spatial + * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams + * for Tx. + * + * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 0 - 9. + * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 10 - 11. + * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 12 - 13. + */ +struct ieee80211_eht_mcs_nss_supp_bw { + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; +}; + +/** + * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data + * + * This structure is the "EHT Capabilities element" fixed fields as + * described in P802.11be_D1.4 section 9.4.2.313. + * + * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP* + * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP* + */ +struct ieee80211_eht_cap_elem_fixed { + u8 mac_cap_info[2]; + u8 phy_cap_info[9]; +} __packed; + +/** + * struct ieee80211_eht_cap_elem - EHT capabilities element + * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed + * @optional: optional parts + */ +struct ieee80211_eht_cap_elem { + struct ieee80211_eht_cap_elem_fixed fixed; + + /* + * Followed by: + * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets. + * EHT PPE Thresholds field: variable length. + */ + u8 optional[]; +} __packed; + +/** + * struct ieee80211_eht_operation - eht operation element + * + * This structure is the "EHT Operation Element" fields as + * described in P802.11be_D1.4 section 9.4.2.311 + * + * FIXME: The spec is unclear how big the fields are, and doesn't + * indicate the "Disabled Subchannel Bitmap Present" in the + * structure (Figure 9-1002a) at all ... + */ +struct ieee80211_eht_operation { + u8 chan_width; + u8 ccfs; + u8 present_bm; + + u8 disable_subchannel_bitmap[]; +} __packed; + +#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x1 + /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 @@ -2129,6 +2235,8 @@ enum ieee80211_client_reg_power { #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e + #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe @@ -2622,6 +2730,194 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0) #define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1) +/* EHT MAC capabilities as defined in P802.11be_D1.4 section 9.4.2.313.2 */ +#define IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS 0x01 +#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02 +#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04 +#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08 +#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10 +#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_MASK 0xc0 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_3895 0 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_7991 1 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_11454 2 + +/* EHT PHY capabilities as defined in P802.11be_D1.4 section 9.4.2.313.3 */ +#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02 +#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04 +#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08 +#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10 +#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20 +#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40 + +/* EHT beamformee number of spatial streams <= 80MHz is split */ +#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80 +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03 + +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0 + +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07 +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38 + +/* EHT number of sounding dimensions for 320MHz is split */ +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0 +#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01 +#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02 +#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04 +#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08 +#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10 +#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20 +#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40 +#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80 + +#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01 +#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02 +#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04 +#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08 +#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0 + +#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01 +#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02 +#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04 +#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3 + +/* Maximum number of supported EHT LTF is split */ +#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0 +#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07 + +#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78 +#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80 + +#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40 +#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80 + +#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01 +#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02 + +/* + * EHT operation channel width as defined in P802.11be_D1.4 section 9.4.2.311 + */ +#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4 + +/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */ +static inline u8 +ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap, + const struct ieee80211_eht_cap_elem_fixed *eht_cap) +{ + u8 count = 0; + + /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */ + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) + return 3; + + /* on 2.4 GHz, these three bits are reserved, so should be 0 */ + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) + count += 3; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + count += 3; + + if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) + count += 3; + + return count ? count : 4; +} + +/* 802.11be EHT PPE Thresholds */ +#define IEEE80211_EHT_PPE_THRES_NSS_POS 0 +#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf +#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0 +#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3 +#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9 + +/* + * Calculate 802.11be EHT capabilities IE EHT field size + */ +static inline u8 +ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info) +{ + u32 n; + + if (!(phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT)) + return 0; + + n = hweight16(ppe_thres_hdr & + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK); + + /* + * Each pair is 6 bits, and we need to add the 9 "header" bits to the + * total size. + */ + n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 + + IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + return DIV_ROUND_UP(n, 8); +} + +static inline bool +ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len) +{ + const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data; + u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed); + + if (len < needed || !he_capa) + return false; + + needed += ieee80211_eht_mcs_nss_size((const void *)he_capa, + (const void *)data); + if (len < needed) + return false; + + if (elem->phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u16 ppe_thres_hdr; + + if (len < needed + sizeof(ppe_thres_hdr)) + return false; + + ppe_thres_hdr = get_unaligned_le16(data + needed); + needed += ieee80211_eht_ppe_size(ppe_thres_hdr, + elem->phy_cap_info); + } + + return len >= needed; +} + +static inline bool +ieee80211_eht_oper_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_eht_operation *elem = (const void *)data; + u8 needed = sizeof(*elem); + + if (len < needed) + return false; + + if (elem->present_bm & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT) + needed += 2; + + return len >= needed; +} #define LISTEN_INT_USF GENMASK(15, 14) #define LISTEN_INT_UI GENMASK(13, 0) @@ -3077,6 +3373,9 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_SHORT_SSID_LIST = 58, WLAN_EID_EXT_HE_6GHZ_CAPA = 59, WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, + WLAN_EID_EXT_EHT_OPERATION = 106, + WLAN_EID_EXT_EHT_MULTI_LINK = 107, + WLAN_EID_EXT_EHT_CAPABILITY = 108, }; /* Action category code */ -- cgit v1.2.3-71-gd317 From 2a2c86f15e17c5013b9897b67d895e64a25ae3cb Mon Sep 17 00:00:00 2001 From: Mordechay Goodstein Date: Mon, 14 Feb 2022 17:29:52 +0100 Subject: ieee80211: add EHT 1K aggregation definitions We add the fields for parsing extended ADDBA request/respond, and new max 1K aggregation for limit ADDBA request/respond. Adjust drivers to use the proper macro, IEEE80211_MAX_AMPDU_BUF -> IEEE80211_MAX_AMPDU_BUF_HE. Signed-off-by: Mordechay Goodstein Link: https://lore.kernel.org/r/20220214173004.b8b447ce95b7.I0ee2554c94e89abc7a752b0f7cc7fd79c273efea@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath11k/mac.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 4 ++-- include/linux/ieee80211.h | 6 +++++- net/mac80211/agg-rx.c | 2 +- 5 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 90fcd6adf2d5..bd40f4c1183a 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8448,7 +8448,7 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; - ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; ar->hw->vif_data_size = sizeof(struct ath11k_vif); ar->hw->sta_data_size = sizeof(struct ath11k_sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 87630d38dc52..5da9d98043fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1077,12 +1077,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (!hw) return NULL; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; op_mode = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 705f362b8f7b..4cf299ea11f5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -311,8 +311,8 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) struct mt7915_dev *dev = phy->dev; hw->queues = 4; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; hw->netdev_features = NETIF_F_RXCSUM; hw->radiotap_timestamp.units_pos = diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7204bc28ca31..a4143466cb32 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1024,6 +1024,8 @@ struct ieee80211_tpc_report_ie { #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 #define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) +#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5) +#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10 struct ieee80211_addba_ext_ie { u8 data; @@ -1698,10 +1700,12 @@ struct ieee80211_ht_operation { * A-MPDU buffer sizes * According to HT size varies from 8 to 64 frames * HE adds the ability to have up to 256 frames. + * EHT adds the ability to have up to 1K frames. */ #define IEEE80211_MIN_AMPDU_BUF 0x8 #define IEEE80211_MAX_AMPDU_BUF_HT 0x40 -#define IEEE80211_MAX_AMPDU_BUF 0x100 +#define IEEE80211_MAX_AMPDU_BUF_HE 0x100 +#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400 /* Spatial Multiplexing Power Save Modes (for capability) */ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 7d2925bb966e..0d2bab9d351c 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -310,7 +310,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, } if (sta->sta.he_cap.has_he) - max_buf_size = IEEE80211_MAX_AMPDU_BUF; + max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE; else max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; -- cgit v1.2.3-71-gd317 From e6df4ead85d9da1b07dd40bd4c6d2182f3e210c4 Mon Sep 17 00:00:00 2001 From: Zhaoyang Huang Date: Tue, 25 Jan 2022 14:56:58 +0800 Subject: psi: fix possible trigger missing in the window When a new threshold breaching stall happens after a psi event was generated and within the window duration, the new event is not generated because the events are rate-limited to one per window. If after that no new stall is recorded then the event will not be generated even after rate-limiting duration has passed. This is happening because with no new stall, window_update will not be called even though threshold was previously breached. To fix this, record threshold breaching occurrence and generate the event once window duration is passed. Suggested-by: Suren Baghdasaryan Signed-off-by: Zhaoyang Huang Signed-off-by: Peter Zijlstra (Intel) Acked-by: Johannes Weiner Acked-by: Suren Baghdasaryan Link: https://lore.kernel.org/r/1643093818-19835-1-git-send-email-huangzhaoyang@gmail.com --- include/linux/psi_types.h | 3 +++ kernel/sched/psi.c | 46 ++++++++++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 516c0fe836fd..dc3ec5e4b9ee 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -144,6 +144,9 @@ struct psi_trigger { /* Refcounting to prevent premature destruction */ struct kref refcount; + + /* Deferred event(s) from previous ratelimit window */ + bool pending_event; }; struct psi_group { diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index cfe76f704d8a..e9d623cb8d1b 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -523,7 +523,7 @@ static void init_triggers(struct psi_group *group, u64 now) static u64 update_triggers(struct psi_group *group, u64 now) { struct psi_trigger *t; - bool new_stall = false; + bool update_total = false; u64 *total = group->total[PSI_POLL]; /* @@ -532,24 +532,35 @@ static u64 update_triggers(struct psi_group *group, u64 now) */ list_for_each_entry(t, &group->triggers, node) { u64 growth; + bool new_stall; - /* Check for stall activity */ - if (group->polling_total[t->state] == total[t->state]) - continue; + new_stall = group->polling_total[t->state] != total[t->state]; + /* Check for stall activity or a previous threshold breach */ + if (!new_stall && !t->pending_event) + continue; /* - * Multiple triggers might be looking at the same state, - * remember to update group->polling_total[] once we've - * been through all of them. Also remember to extend the - * polling time if we see new stall activity. + * Check for new stall activity, as well as deferred + * events that occurred in the last window after the + * trigger had already fired (we want to ratelimit + * events without dropping any). */ - new_stall = true; - - /* Calculate growth since last update */ - growth = window_update(&t->win, now, total[t->state]); - if (growth < t->threshold) - continue; - + if (new_stall) { + /* + * Multiple triggers might be looking at the same state, + * remember to update group->polling_total[] once we've + * been through all of them. Also remember to extend the + * polling time if we see new stall activity. + */ + update_total = true; + + /* Calculate growth since last update */ + growth = window_update(&t->win, now, total[t->state]); + if (growth < t->threshold) + continue; + + t->pending_event = true; + } /* Limit event signaling to once per window */ if (now < t->last_event_time + t->win.size) continue; @@ -558,9 +569,11 @@ static u64 update_triggers(struct psi_group *group, u64 now) if (cmpxchg(&t->event, 0, 1) == 0) wake_up_interruptible(&t->event_wait); t->last_event_time = now; + /* Reset threshold breach flag once event got generated */ + t->pending_event = false; } - if (new_stall) + if (update_total) memcpy(group->polling_total, total, sizeof(group->polling_total)); @@ -1125,6 +1138,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, t->last_event_time = 0; init_waitqueue_head(&t->event_wait); kref_init(&t->refcount); + t->pending_event = false; mutex_lock(&group->trigger_lock); -- cgit v1.2.3-71-gd317 From 04d4e665a60902cf36e7ad39af1179cb5df542ad Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 7 Feb 2022 16:59:06 +0100 Subject: sched/isolation: Use single feature type while referring to housekeeping cpumask Refer to housekeeping APIs using single feature types instead of flags. This prevents from passing multiple isolation features at once to housekeeping interfaces, which soon won't be possible anymore as each isolation features will have their own cpumask. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Juri Lelli Reviewed-by: Phil Auld Link: https://lore.kernel.org/r/20220207155910.527133-5-frederic@kernel.org --- arch/x86/kernel/cpu/aperfmperf.c | 6 +++--- arch/x86/kvm/x86.c | 2 +- drivers/base/cpu.c | 2 +- drivers/pci/pci-driver.c | 4 ++-- include/linux/sched/isolation.h | 43 ++++++++++++++++++++-------------------- kernel/cgroup/cpuset.c | 6 +++--- kernel/cpu.c | 4 ++-- kernel/irq/cpuhotplug.c | 4 ++-- kernel/irq/manage.c | 4 ++-- kernel/kthread.c | 4 ++-- kernel/rcu/tasks.h | 2 +- kernel/rcu/tree_plugin.h | 6 +++--- kernel/sched/core.c | 12 +++++------ kernel/sched/fair.c | 10 +++++----- kernel/sched/isolation.c | 32 ++++++++++++++++++++---------- kernel/sched/topology.c | 8 ++++---- kernel/watchdog.c | 2 +- kernel/workqueue.c | 4 ++-- net/core/net-sysfs.c | 4 ++-- 19 files changed, 86 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 22911deacb6e..9ca008f9e9b1 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -91,7 +91,7 @@ unsigned int aperfmperf_get_khz(int cpu) if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) return 0; if (rcu_is_idle_cpu(cpu)) @@ -114,7 +114,7 @@ void arch_freq_prepare_all(void) return; for_each_online_cpu(cpu) { - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) continue; if (rcu_is_idle_cpu(cpu)) continue; /* Idle CPUs are completely uninteresting. */ @@ -136,7 +136,7 @@ unsigned int arch_freq_get_on_cpu(int cpu) if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - if (!housekeeping_cpu(cpu, HK_FLAG_MISC)) + if (!housekeeping_cpu(cpu, HK_TYPE_MISC)) return 0; if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9e43d756312f..02a7ac1b6bb2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8769,7 +8769,7 @@ int kvm_arch_init(void *opaque) } if (pi_inject_timer == -1) - pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER); + pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 5fc258073bc7..2ef23fce0860 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -275,7 +275,7 @@ static ssize_t print_cpus_isolated(struct device *dev, return -ENOMEM; cpumask_andnot(isolated, cpu_possible_mask, - housekeeping_cpumask(HK_FLAG_DOMAIN)); + housekeeping_cpumask(HK_TYPE_DOMAIN)); len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated)); free_cpumask_var(isolated); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4a5792c82d08..f61c40a47891 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -377,8 +377,8 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, goto out; } cpumask_and(wq_domain_mask, - housekeeping_cpumask(HK_FLAG_WQ), - housekeeping_cpumask(HK_FLAG_DOMAIN)); + housekeeping_cpumask(HK_TYPE_WQ), + housekeeping_cpumask(HK_TYPE_DOMAIN)); cpu = cpumask_any_and(cpumask_of_node(node), wq_domain_mask); diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index cc9f393e2a70..8c15abd67aed 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -5,54 +5,55 @@ #include #include -enum hk_flags { - HK_FLAG_TIMER = 1, - HK_FLAG_RCU = (1 << 1), - HK_FLAG_MISC = (1 << 2), - HK_FLAG_SCHED = (1 << 3), - HK_FLAG_TICK = (1 << 4), - HK_FLAG_DOMAIN = (1 << 5), - HK_FLAG_WQ = (1 << 6), - HK_FLAG_MANAGED_IRQ = (1 << 7), - HK_FLAG_KTHREAD = (1 << 8), +enum hk_type { + HK_TYPE_TIMER, + HK_TYPE_RCU, + HK_TYPE_MISC, + HK_TYPE_SCHED, + HK_TYPE_TICK, + HK_TYPE_DOMAIN, + HK_TYPE_WQ, + HK_TYPE_MANAGED_IRQ, + HK_TYPE_KTHREAD, + HK_TYPE_MAX }; #ifdef CONFIG_CPU_ISOLATION DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); -extern int housekeeping_any_cpu(enum hk_flags flags); -extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); -extern bool housekeeping_enabled(enum hk_flags flags); -extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); -extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); +extern int housekeeping_any_cpu(enum hk_type type); +extern const struct cpumask *housekeeping_cpumask(enum hk_type type); +extern bool housekeeping_enabled(enum hk_type type); +extern void housekeeping_affine(struct task_struct *t, enum hk_type type); +extern bool housekeeping_test_cpu(int cpu, enum hk_type type); extern void __init housekeeping_init(void); #else -static inline int housekeeping_any_cpu(enum hk_flags flags) +static inline int housekeeping_any_cpu(enum hk_type type) { return smp_processor_id(); } -static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) +static inline const struct cpumask *housekeeping_cpumask(enum hk_type type) { return cpu_possible_mask; } -static inline bool housekeeping_enabled(enum hk_flags flags) +static inline bool housekeeping_enabled(enum hk_type type) { return false; } static inline void housekeeping_affine(struct task_struct *t, - enum hk_flags flags) { } + enum hk_type type) { } static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ -static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) +static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION if (static_branch_unlikely(&housekeeping_overridden)) - return housekeeping_test_cpu(cpu, flags); + return housekeeping_test_cpu(cpu, type); #endif return true; } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index dc653ab26e50..e4e18a2cb404 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -803,7 +803,7 @@ static int generate_sched_domains(cpumask_var_t **domains, update_domain_attr_tree(dattr, &top_cpuset); } cpumask_and(doms[0], top_cpuset.effective_cpus, - housekeeping_cpumask(HK_FLAG_DOMAIN)); + housekeeping_cpumask(HK_TYPE_DOMAIN)); goto done; } @@ -833,7 +833,7 @@ static int generate_sched_domains(cpumask_var_t **domains, if (!cpumask_empty(cp->cpus_allowed) && !(is_sched_load_balance(cp) && cpumask_intersects(cp->cpus_allowed, - housekeeping_cpumask(HK_FLAG_DOMAIN)))) + housekeeping_cpumask(HK_TYPE_DOMAIN)))) continue; if (root_load_balance && @@ -922,7 +922,7 @@ restart: if (apn == b->pn) { cpumask_or(dp, dp, b->effective_cpus); - cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); + cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); if (dattr) update_domain_attr_tree(dattr + nslot, b); diff --git a/kernel/cpu.c b/kernel/cpu.c index 407a2568f35e..f39eb0b52dfe 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1488,8 +1488,8 @@ int freeze_secondary_cpus(int primary) cpu_maps_update_begin(); if (primary == -1) { primary = cpumask_first(cpu_online_mask); - if (!housekeeping_cpu(primary, HK_FLAG_TIMER)) - primary = housekeeping_any_cpu(HK_FLAG_TIMER); + if (!housekeeping_cpu(primary, HK_TYPE_TIMER)) + primary = housekeeping_any_cpu(HK_TYPE_TIMER); } else { if (!cpu_online(primary)) primary = cpumask_first(cpu_online_mask); diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 39a41c56ad4f..1ed2b1739363 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -176,10 +176,10 @@ static bool hk_should_isolate(struct irq_data *data, unsigned int cpu) { const struct cpumask *hk_mask; - if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) + if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) return false; - hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); + hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask)) return false; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f23ffd30385b..c03f71d5ec10 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -247,13 +247,13 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, * online. */ if (irqd_affinity_is_managed(data) && - housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { + housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { const struct cpumask *hk_mask, *prog_mask; static DEFINE_RAW_SPINLOCK(tmp_mask_lock); static struct cpumask tmp_mask; - hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); + hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); diff --git a/kernel/kthread.c b/kernel/kthread.c index 38c6dd822da8..d100d5a15b38 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -356,7 +356,7 @@ static int kthread(void *_create) * back to default in case they have been changed. */ sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD)); + set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD)); /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); @@ -722,7 +722,7 @@ int kthreadd(void *unused) /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); - set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); + set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD)); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 84f1d91604cc..6093b200dff7 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -492,7 +492,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) struct rcu_tasks *rtp = arg; /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ - housekeeping_affine(current, HK_FLAG_RCU); + housekeeping_affine(current, HK_TYPE_RCU); WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c5b45c2f68a1..65f25a32f6d7 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1214,9 +1214,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) if ((mask & leaf_node_cpu_bit(rnp, cpu)) && cpu != outgoingcpu) cpumask_set_cpu(cpu, cm); - cpumask_and(cm, cm, housekeeping_cpumask(HK_FLAG_RCU)); + cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); if (cpumask_weight(cm) == 0) - cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU)); + cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); set_cpus_allowed_ptr(t, cm); free_cpumask_var(cm); } @@ -1291,7 +1291,7 @@ static void rcu_bind_gp_kthread(void) { if (!tick_nohz_full_enabled()) return; - housekeeping_affine(current, HK_FLAG_RCU); + housekeeping_affine(current, HK_TYPE_RCU); } /* Record the current task on dyntick-idle entry. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b2226922206d..1e08b02e0cd5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1025,13 +1025,13 @@ int get_nohz_timer_target(void) struct sched_domain *sd; const struct cpumask *hk_mask; - if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { + if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { if (!idle_cpu(cpu)) return cpu; default_cpu = cpu; } - hk_mask = housekeeping_cpumask(HK_FLAG_TIMER); + hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); rcu_read_lock(); for_each_domain(cpu, sd) { @@ -1047,7 +1047,7 @@ int get_nohz_timer_target(void) } if (default_cpu == -1) - default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); + default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); cpu = default_cpu; unlock: rcu_read_unlock(); @@ -5371,7 +5371,7 @@ static void sched_tick_start(int cpu) int os; struct tick_work *twork; - if (housekeeping_cpu(cpu, HK_FLAG_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK)) return; WARN_ON_ONCE(!tick_work_cpu); @@ -5392,7 +5392,7 @@ static void sched_tick_stop(int cpu) struct tick_work *twork; int os; - if (housekeeping_cpu(cpu, HK_FLAG_TICK)) + if (housekeeping_cpu(cpu, HK_TYPE_TICK)) return; WARN_ON_ONCE(!tick_work_cpu); @@ -9251,7 +9251,7 @@ void __init sched_init_smp(void) mutex_unlock(&sched_domains_mutex); /* Move init over to a non-isolated CPU */ - if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) + if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) BUG(); current->flags &= ~PF_NO_SETAFFINITY; sched_init_granularity(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 11a72e1b3b2c..dcbd3110c687 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10337,7 +10337,7 @@ static inline int on_null_domain(struct rq *rq) * - When one of the busy CPUs notice that there may be an idle rebalancing * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. - * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set + * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set * anywhere yet. */ @@ -10346,7 +10346,7 @@ static inline int find_new_ilb(void) int ilb; const struct cpumask *hk_mask; - hk_mask = housekeeping_cpumask(HK_FLAG_MISC); + hk_mask = housekeeping_cpumask(HK_TYPE_MISC); for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { @@ -10362,7 +10362,7 @@ static inline int find_new_ilb(void) /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick any - * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). + * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one). */ static void kick_ilb(unsigned int flags) { @@ -10575,7 +10575,7 @@ void nohz_balance_enter_idle(int cpu) return; /* Spare idle load balancing on CPUs that don't want to be disturbed: */ - if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) + if (!housekeeping_cpu(cpu, HK_TYPE_SCHED)) return; /* @@ -10791,7 +10791,7 @@ static void nohz_newidle_balance(struct rq *this_rq) * This CPU doesn't want to be disturbed by scheduler * housekeeping */ - if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) + if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED)) return; /* Will wake up very soon. No time for doing anything else*/ diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 7f06eaf12818..a735d9e229dd 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -9,23 +9,35 @@ */ #include "sched.h" +enum hk_flags { + HK_FLAG_TIMER = BIT(HK_TYPE_TIMER), + HK_FLAG_RCU = BIT(HK_TYPE_RCU), + HK_FLAG_MISC = BIT(HK_TYPE_MISC), + HK_FLAG_SCHED = BIT(HK_TYPE_SCHED), + HK_FLAG_TICK = BIT(HK_TYPE_TICK), + HK_FLAG_DOMAIN = BIT(HK_TYPE_DOMAIN), + HK_FLAG_WQ = BIT(HK_TYPE_WQ), + HK_FLAG_MANAGED_IRQ = BIT(HK_TYPE_MANAGED_IRQ), + HK_FLAG_KTHREAD = BIT(HK_TYPE_KTHREAD), +}; + DEFINE_STATIC_KEY_FALSE(housekeeping_overridden); EXPORT_SYMBOL_GPL(housekeeping_overridden); static cpumask_var_t housekeeping_mask; static unsigned int housekeeping_flags; -bool housekeeping_enabled(enum hk_flags flags) +bool housekeeping_enabled(enum hk_type type) { - return !!(housekeeping_flags & flags); + return !!(housekeeping_flags & BIT(type)); } EXPORT_SYMBOL_GPL(housekeeping_enabled); -int housekeeping_any_cpu(enum hk_flags flags) +int housekeeping_any_cpu(enum hk_type type) { int cpu; if (static_branch_unlikely(&housekeeping_overridden)) { - if (housekeeping_flags & flags) { + if (housekeeping_flags & BIT(type)) { cpu = sched_numa_find_closest(housekeeping_mask, smp_processor_id()); if (cpu < nr_cpu_ids) return cpu; @@ -37,27 +49,27 @@ int housekeeping_any_cpu(enum hk_flags flags) } EXPORT_SYMBOL_GPL(housekeeping_any_cpu); -const struct cpumask *housekeeping_cpumask(enum hk_flags flags) +const struct cpumask *housekeeping_cpumask(enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) - if (housekeeping_flags & flags) + if (housekeeping_flags & BIT(type)) return housekeeping_mask; return cpu_possible_mask; } EXPORT_SYMBOL_GPL(housekeeping_cpumask); -void housekeeping_affine(struct task_struct *t, enum hk_flags flags) +void housekeeping_affine(struct task_struct *t, enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) - if (housekeeping_flags & flags) + if (housekeeping_flags & BIT(type)) set_cpus_allowed_ptr(t, housekeeping_mask); } EXPORT_SYMBOL_GPL(housekeeping_affine); -bool housekeeping_test_cpu(int cpu, enum hk_flags flags) +bool housekeeping_test_cpu(int cpu, enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) - if (housekeeping_flags & flags) + if (housekeeping_flags & BIT(type)) return cpumask_test_cpu(cpu, housekeeping_mask); return true; } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 5db322c9cb3f..32841c6741d1 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1366,7 +1366,7 @@ static void asym_cpu_capacity_scan(void) list_for_each_entry(entry, &asym_cap_list, link) cpumask_clear(cpu_capacity_span(entry)); - for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)) + for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) asym_cpu_capacity_update_data(cpu); list_for_each_entry_safe(entry, next, &asym_cap_list, link) { @@ -2440,7 +2440,7 @@ int sched_init_domains(const struct cpumask *cpu_map) doms_cur = alloc_sched_domains(ndoms_cur); if (!doms_cur) doms_cur = &fallback_doms; - cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); + cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); err = build_sched_domains(doms_cur[0], NULL); return err; @@ -2529,7 +2529,7 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], if (doms_new) { n = 1; cpumask_and(doms_new[0], cpu_active_mask, - housekeeping_cpumask(HK_FLAG_DOMAIN)); + housekeeping_cpumask(HK_TYPE_DOMAIN)); } } else { n = ndoms_new; @@ -2564,7 +2564,7 @@ match1: n = 0; doms_new = &fallback_doms; cpumask_and(doms_new[0], cpu_active_mask, - housekeeping_cpumask(HK_FLAG_DOMAIN)); + housekeeping_cpumask(HK_TYPE_DOMAIN)); } /* Build new domains: */ diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 99afb88d2e85..9166220457bc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -848,7 +848,7 @@ void __init lockup_detector_init(void) pr_info("Disabling watchdog on nohz_full cores by default\n"); cpumask_copy(&watchdog_cpumask, - housekeeping_cpumask(HK_FLAG_TIMER)); + housekeeping_cpumask(HK_TYPE_TIMER)); if (!watchdog_nmi_probe()) nmi_watchdog_available = true; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 61ed310621ea..52e9abbb7759 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6011,8 +6011,8 @@ void __init workqueue_init_early(void) BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); - cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_WQ)); - cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); + cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ)); + cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ed8da7b8d35b..7ceb3460161b 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -839,8 +839,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, } if (!cpumask_empty(mask)) { - cpumask_and(mask, mask, housekeeping_cpumask(HK_FLAG_DOMAIN)); - cpumask_and(mask, mask, housekeeping_cpumask(HK_FLAG_WQ)); + cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); + cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); if (cpumask_empty(mask)) { free_cpumask_var(mask); return -EINVAL; -- cgit v1.2.3-71-gd317 From fe65deb56e552a8c9bf7f27860dbdeac12a36116 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 14 Feb 2022 01:57:16 +0900 Subject: jump_label: Avoid unneeded casts in STATIC_KEY_INIT_{TRUE,FALSE} Commit 3821fd35b58d ("jump_label: Reduce the size of struct static_key") introduced the union to struct static_key. It is more natual to set JUMP_TYPE_* to the .type field without casting. Signed-off-by: Masahiro Yamada Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220213165717.2354046-1-masahiroy@kernel.org --- include/linux/jump_label.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 48b9b2a82767..6924e6837e6d 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -251,10 +251,10 @@ extern void static_key_disable_cpuslocked(struct static_key *key); */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ - { .entries = (void *)JUMP_TYPE_TRUE } } + { .type = JUMP_TYPE_TRUE } } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ - { .entries = (void *)JUMP_TYPE_FALSE } } + { .type = JUMP_TYPE_FALSE } } #else /* !CONFIG_JUMP_LABEL */ -- cgit v1.2.3-71-gd317 From cd27ccfc727e99352321c0c75012ab9c5a90321e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 14 Feb 2022 01:57:17 +0900 Subject: jump_label: Refactor #ifdef of struct static_key Move #ifdef CONFIG_JUMP_LABEL inside the struct static_key. Signed-off-by: Masahiro Yamada Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220213165717.2354046-2-masahiroy@kernel.org --- include/linux/jump_label.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 6924e6837e6d..107751cc047b 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -82,10 +82,9 @@ extern bool static_key_initialized; "%s(): static key '%pS' used before call to jump_label_init()", \ __func__, (key)) -#ifdef CONFIG_JUMP_LABEL - struct static_key { atomic_t enabled; +#ifdef CONFIG_JUMP_LABEL /* * Note: * To make anonymous unions work with old compilers, the static @@ -104,13 +103,9 @@ struct static_key { struct jump_entry *entries; struct static_key_mod *next; }; +#endif /* CONFIG_JUMP_LABEL */ }; -#else -struct static_key { - atomic_t enabled; -}; -#endif /* CONFIG_JUMP_LABEL */ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_JUMP_LABEL -- cgit v1.2.3-71-gd317 From 8c30e2d81bfddc5ab9f6b04db1c0f7d6ca7bdf46 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 11 Feb 2022 10:46:40 +0100 Subject: fbdev: Don't sort deferred-I/O pages by default Fbdev's deferred I/O sorts all dirty pages by default, which incurs a significant overhead. Make the sorting step optional and update the few drivers that require it. Use a FIFO list by default. Most fbdev drivers with deferred I/O build a bounding rectangle around the dirty pages or simply flush the whole screen. The only two affected DRM drivers, generic fbdev and vmwgfx, both use a bounding rectangle. In those cases, the exact order of the pages doesn't matter. The other drivers look at the page index or handle pages one-by-one. The patch sets the sort_pagelist flag for those, even though some of them would probably work correctly without sorting. Driver maintainers should update their driver accordingly. Sorting pages by memory offset for deferred I/O performs an implicit bubble-sort step on the list of dirty pages. The algorithm goes through the list of dirty pages and inserts each new page according to its index field. Even worse, list traversal always starts at the first entry. As video memory is most likely updated scanline by scanline, the algorithm traverses through the complete list for each updated page. For example, with 1024x768x32bpp each page covers exactly one scanline. Writing a single screen update from top to bottom requires updating 768 pages. With an average list length of 384 entries, a screen update creates (768 * 384 =) 294912 compare operation. Fix this by making the sorting step opt-in and update the few drivers that require it. All other drivers work with unsorted page lists. Pages are appended to the list. Therefore, in the common case of writing the framebuffer top to bottom, pages are still sorted by offset, which may have a positive effect on performance. Playing a video [1] in mplayer's benchmark mode shows the difference (i7-4790, FullHD, simpledrm, kernel with debugging). mplayer -benchmark -nosound -vo fbdev ./big_buck_bunny_720p_stereo.ogg With sorted page lists: BENCHMARKs: VC: 32.960s VO: 73.068s A: 0.000s Sys: 2.413s = 108.441s BENCHMARK%: VC: 30.3947% VO: 67.3802% A: 0.0000% Sys: 2.2251% = 100.0000% With unsorted page lists: BENCHMARKs: VC: 31.005s VO: 42.889s A: 0.000s Sys: 2.256s = 76.150s BENCHMARK%: VC: 40.7156% VO: 56.3219% A: 0.0000% Sys: 2.9625% = 100.0000% VC shows the overhead of video decoding, VO shows the overhead of the video output. Using unsorted page lists reduces the benchmark's run time by ~32s/~25%. v2: * Make sorted pagelists the special case (Sam) * Comment on drivers' use of pagelist (Sam) * Warn about the overhead in comment Signed-off-by: Thomas Zimmermann Acked-by: Sam Ravnborg Acked-by: Andy Shevchenko Link: https://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.ogg # [1] Link: https://patchwork.freedesktop.org/patch/msgid/20220211094640.21632-3-tzimmermann@suse.de --- drivers/staging/fbtft/fbtft-core.c | 1 + drivers/video/fbdev/broadsheetfb.c | 1 + drivers/video/fbdev/core/fb_defio.c | 24 +++++++++++++++++------- drivers/video/fbdev/metronomefb.c | 1 + drivers/video/fbdev/udlfb.c | 1 + include/linux/fb.h | 1 + 6 files changed, 22 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index f2684d2d6851..4a35347b3020 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -654,6 +654,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbops->fb_blank = fbtft_fb_blank; fbdefio->delay = HZ / fps; + fbdefio->sort_pagelist = true; fbdefio->deferred_io = fbtft_deferred_io; fb_deferred_io_init(info); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index fd66f4d4a621..b9054f658838 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -1059,6 +1059,7 @@ static const struct fb_ops broadsheetfb_ops = { static struct fb_deferred_io broadsheetfb_defio = { .delay = HZ/4, + .sort_pagelist = true, .deferred_io = broadsheetfb_dpy_deferred_io, }; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 169a81c8172b..98b0f23bf5e2 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -96,7 +96,7 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) struct page *page = vmf->page; struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; - struct page *cur; + struct list_head *pos = &fbdefio->pagelist; /* this is a callback we get when userspace first tries to write to the page. we schedule a workqueue. that workqueue @@ -137,14 +137,24 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) if (!list_empty(&page->lru)) goto page_already_added; - /* we loop through the pagelist before adding in order - to keep the pagelist sorted */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { - if (cur->index > page->index) - break; + if (unlikely(fbdefio->sort_pagelist)) { + /* + * We loop through the pagelist before adding in order to + * keep the pagelist sorted. This has significant overhead + * of O(n^2) with n being the number of written pages. If + * possible, drivers should try to work with unsorted page + * lists instead. + */ + struct page *cur; + + list_for_each_entry(cur, &fbdefio->pagelist, lru) { + if (cur->index > page->index) + break; + } + pos = &cur->lru; } - list_add_tail(&page->lru, &cur->lru); + list_add_tail(&page->lru, pos); page_already_added: mutex_unlock(&fbdefio->lock); diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c index 952826557a0c..af858dd23ea6 100644 --- a/drivers/video/fbdev/metronomefb.c +++ b/drivers/video/fbdev/metronomefb.c @@ -568,6 +568,7 @@ static const struct fb_ops metronomefb_ops = { static struct fb_deferred_io metronomefb_defio = { .delay = HZ, + .sort_pagelist = true, .deferred_io = metronomefb_dpy_deferred_io, }; diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index b9cdd02c1000..184bb8433b78 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -980,6 +980,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; + fbdefio->sort_pagelist = true; fbdefio->deferred_io = dlfb_dpy_deferred_io; } diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a14f3f8a329..39baa9a70779 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -204,6 +204,7 @@ struct fb_pixmap { struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; + bool sort_pagelist; /* sort pagelist by offset */ struct mutex lock; /* mutex that protects the page list */ struct list_head pagelist; /* list of touched pages */ /* callback */ -- cgit v1.2.3-71-gd317 From e1be43d9b5d0d1310dbd90185a8e5c7145dde40f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 18 Sep 2021 15:17:53 -0700 Subject: overflow: Implement size_t saturating arithmetic helpers In order to perform more open-coded replacements of common allocation size arithmetic, the kernel needs saturating (SIZE_MAX) helpers for multiplication, addition, and subtraction. For example, it is common in allocators, especially on realloc, to add to an existing size: p = krealloc(map->patch, sizeof(struct reg_sequence) * (map->patch_regs + num_regs), GFP_KERNEL); There is no existing saturating replacement for this calculation, and just leaving the addition open coded inside array_size() could potentially overflow as well. For example, an overflow in an expression for a size_t argument might wrap to zero: array_size(anything, something_at_size_max + 1) == 0 Introduce size_mul(), size_add(), and size_sub() helpers that implicitly promote arguments to size_t and saturated calculations for use in allocations. With these helpers it is also possible to redefine array_size(), array3_size(), flex_array_size(), and struct_size() in terms of the new helpers. As with the check_*_overflow() helpers, the new helpers use __must_check, though what is really desired is a way to make sure that assignment is only to a size_t lvalue. Without this, it's still possible to introduce overflow/underflow via type conversion (i.e. from size_t to int). Enforcing this will currently need to be left to static analysis or future use of -Wconversion. Additionally update the overflow unit tests to force runtime evaluation for the pathological cases. Cc: Rasmus Villemoes Cc: Gustavo A. R. Silva Cc: Nathan Chancellor Cc: Jason Gunthorpe Cc: Nick Desaulniers Cc: Leon Romanovsky Cc: Keith Busch Cc: Len Baker Signed-off-by: Kees Cook --- Documentation/process/deprecated.rst | 20 ++++++- include/linux/overflow.h | 110 ++++++++++++++++++++++------------- lib/test_overflow.c | 98 +++++++++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index 388cb19f5dbb..a6e36d9c3d14 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -71,6 +71,9 @@ Instead, the 2-factor form of the allocator should be used:: foo = kmalloc_array(count, size, GFP_KERNEL); +Specifically, kmalloc() can be replaced with kmalloc_array(), and +kzalloc() can be replaced with kcalloc(). + If no 2-factor form is available, the saturate-on-overflow helpers should be used:: @@ -91,9 +94,20 @@ Instead, use the helper:: array usage and switch to a `flexible array member <#zero-length-and-one-element-arrays>`_ instead. -See array_size(), array3_size(), and struct_size(), -for more details as well as the related check_add_overflow() and -check_mul_overflow() family of functions. +For other calculations, please compose the use of the size_mul(), +size_add(), and size_sub() helpers. For example, in the case of:: + + foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL); + +Instead, use the helpers:: + + foo = krealloc(size_add(current_size, + size_mul(chunk_size, + size_sub(count, 3))), GFP_KERNEL); + +For more details, also see array3_size() and flex_array_size(), +as well as the related check_mul_overflow(), check_add_overflow(), +check_sub_overflow(), and check_shl_overflow() family of functions. simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull() ---------------------------------------------------------------------- diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 4669632bd72b..59d7228104d0 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -118,81 +118,94 @@ static inline bool __must_check __must_check_overflow(bool overflow) })) /** - * array_size() - Calculate size of 2-dimensional array. - * - * @a: dimension one - * @b: dimension two + * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * - * Calculates size of 2-dimensional array: @a * @b. + * @factor1: first factor + * @factor2: second factor * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @factor1 * @factor2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array_size(size_t a, size_t b) +static inline size_t __must_check size_mul(size_t factor1, size_t factor2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) + if (check_mul_overflow(factor1, factor2, &bytes)) return SIZE_MAX; return bytes; } /** - * array3_size() - Calculate size of 3-dimensional array. + * size_add() - Calculate size_t addition with saturation at SIZE_MAX * - * @a: dimension one - * @b: dimension two - * @c: dimension three - * - * Calculates size of 3-dimensional array: @a * @b * @c. + * @addend1: first addend + * @addend2: second addend * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @addend1 + @addend2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_add(size_t addend1, size_t addend2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_mul_overflow(bytes, c, &bytes)) + if (check_add_overflow(addend1, addend2, &bytes)) return SIZE_MAX; return bytes; } -/* - * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for - * struct_size() below. +/** + * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX + * + * @minuend: value to subtract from + * @subtrahend: value to subtract from @minuend + * + * Returns: calculate @minuend - @subtrahend, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. For + * composition with the size_add() and size_mul() helpers, neither + * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). + * The lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_add_overflow(bytes, c, &bytes)) + if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || + check_sub_overflow(minuend, subtrahend, &bytes)) return SIZE_MAX; return bytes; } /** - * struct_size() - Calculate size of structure with trailing array. - * @p: Pointer to the structure. - * @member: Name of the array member. - * @count: Number of elements in the array. + * array_size() - Calculate size of 2-dimensional array. * - * Calculates size of memory needed for structure @p followed by an - * array of @count number of @member elements. + * @a: dimension one + * @b: dimension two * - * Return: number of bytes needed or SIZE_MAX on overflow. + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. */ -#define struct_size(p, member, count) \ - __ab_c_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member),\ - sizeof(*(p))) +#define array_size(a, b) size_mul(a, b) + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +#define array3_size(a, b, c) size_mul(size_mul(a, b), c) /** * flex_array_size() - Calculate size of a flexible array member @@ -208,7 +221,22 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - array_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + size_mul(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + +/** + * struct_size() - Calculate size of structure with trailing flexible array. + * + * @p: Pointer to the structure. + * @member: Name of the array member. + * @count: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @count number of @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, count) \ + size_add(sizeof(*(p)), flex_array_size(p, member, count)) #endif /* __LINUX_OVERFLOW_H */ diff --git a/lib/test_overflow.c b/lib/test_overflow.c index cea37ae82615..712fb2351c27 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -594,12 +594,110 @@ static int __init test_overflow_allocation(void) return err; } +struct __test_flex_array { + unsigned long flags; + size_t count; + unsigned long data[]; +}; + +static int __init test_overflow_size_helpers(void) +{ + struct __test_flex_array *obj; + int count = 0; + int err = 0; + int var; + +#define check_one_size_helper(expected, func, args...) ({ \ + bool __failure = false; \ + size_t _r; \ + \ + _r = func(args); \ + if (_r != (expected)) { \ + pr_warn("expected " #func "(" #args ") " \ + "to return %zu but got %zu instead\n", \ + (size_t)(expected), _r); \ + __failure = true; \ + } \ + count++; \ + __failure; \ +}) + + var = 4; + err |= check_one_size_helper(20, size_mul, var++, 5); + err |= check_one_size_helper(20, size_mul, 4, var++); + err |= check_one_size_helper(0, size_mul, 0, 3); + err |= check_one_size_helper(0, size_mul, 3, 0); + err |= check_one_size_helper(6, size_mul, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(9, size_add, var++, 5); + err |= check_one_size_helper(9, size_add, 4, var++); + err |= check_one_size_helper(9, size_add, 9, 0); + err |= check_one_size_helper(9, size_add, 0, 9); + err |= check_one_size_helper(5, size_add, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(1, size_sub, var--, 3); + err |= check_one_size_helper(1, size_sub, 4, var--); + err |= check_one_size_helper(1, size_sub, 3, 2); + err |= check_one_size_helper(9, size_sub, 9, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9); + err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); + err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); + err |= check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); + + var = 4; + err |= check_one_size_helper(4 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(5 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(0, flex_array_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj->data), + flex_array_size, obj, data, 1); + err |= check_one_size_helper(7 * sizeof(*obj->data), + flex_array_size, obj, data, 7); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, -1); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, SIZE_MAX - 4); + + var = 4; + err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), + struct_size, obj, data, 1); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, -3); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, SIZE_MAX - 3); + + pr_info("%d overflow size helper tests finished\n", count); + + return err; +} + static int __init test_module_init(void) { int err = 0; err |= test_overflow_calculation(); err |= test_overflow_shift(); + err |= test_overflow_size_helpers(); err |= test_overflow_allocation(); if (err) { -- cgit v1.2.3-71-gd317 From 230f6fa2c1db6a3f3e668cfe95995ac8e6eee212 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 9 Feb 2022 16:40:41 -0800 Subject: overflow: Provide constant expression struct_size There have been cases where struct_size() (or flex_array_size()) needs to be calculated for an initializer, which requires it be a constant expression. This is possible when the "count" argument is a constant expression, so provide this ability for the helpers. Cc: Gustavo A. R. Silva Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Rasmus Villemoes Signed-off-by: Kees Cook Reviewed-by: Gustavo A. R. Silva Tested-by: Gustavo A. R. Silva Link: https://lore.kernel.org/lkml/20220210010407.GA701603@embeddedor --- include/linux/overflow.h | 10 +++++++--- lib/test_overflow.c | 26 +++++++++++++++++--------- 2 files changed, 24 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 59d7228104d0..f1221d11f8e5 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -4,6 +4,7 @@ #include #include +#include /* * We need to compute the minimum and maximum values representable in a given @@ -221,8 +222,9 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - size_mul(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + __builtin_choose_expr(__is_constexpr(count), \ + (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \ + size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member))) /** * struct_size() - Calculate size of structure with trailing flexible array. @@ -237,6 +239,8 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define struct_size(p, member, count) \ - size_add(sizeof(*(p)), flex_array_size(p, member, count)) + __builtin_choose_expr(__is_constexpr(count), \ + sizeof(*(p)) + flex_array_size(p, member, count), \ + size_add(sizeof(*(p)), flex_array_size(p, member, count))) #endif /* __LINUX_OVERFLOW_H */ diff --git a/lib/test_overflow.c b/lib/test_overflow.c index 712fb2351c27..f6530fce799d 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -602,10 +602,18 @@ struct __test_flex_array { static int __init test_overflow_size_helpers(void) { + /* Make sure struct_size() can be used in a constant expression. */ + u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; struct __test_flex_array *obj; int count = 0; int err = 0; int var; + volatile int unconst = 0; + + /* Verify constant expression against runtime version. */ + var = 55; + OPTIMIZER_HIDE_VAR(var); + err |= sizeof(ce_array) != struct_size(obj, data, var); #define check_one_size_helper(expected, func, args...) ({ \ bool __failure = false; \ @@ -663,28 +671,28 @@ static int __init test_overflow_size_helpers(void) flex_array_size, obj, data, var++); err |= check_one_size_helper(5 * sizeof(*obj->data), flex_array_size, obj, data, var++); - err |= check_one_size_helper(0, flex_array_size, obj, data, 0); + err |= check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); err |= check_one_size_helper(sizeof(*obj->data), - flex_array_size, obj, data, 1); + flex_array_size, obj, data, 1 + unconst); err |= check_one_size_helper(7 * sizeof(*obj->data), - flex_array_size, obj, data, 7); + flex_array_size, obj, data, 7 + unconst); err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, -1); + flex_array_size, obj, data, -1 + unconst); err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, SIZE_MAX - 4); + flex_array_size, obj, data, SIZE_MAX - 4 + unconst); var = 4; err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), struct_size, obj, data, var++); err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), struct_size, obj, data, var++); - err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), - struct_size, obj, data, 1); + struct_size, obj, data, 1 + unconst); err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, -3); + struct_size, obj, data, -3 + unconst); err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, SIZE_MAX - 3); + struct_size, obj, data, SIZE_MAX - 3 + unconst); pr_info("%d overflow size helper tests finished\n", count); -- cgit v1.2.3-71-gd317 From 28db4711bf48303814dcfd8d41a41106e90bc374 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Feb 2022 11:05:38 +0100 Subject: blk-mq: remove the request_queue argument to blk_insert_cloned_request The request must be submitted to the queue it was allocated for, so remove the extra request_queue argument. Signed-off-by: Christoph Hellwig Reviewed-by: Mike Snitzer Link: https://lore.kernel.org/r/20220215100540.3892965-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 9 ++++----- drivers/md/dm-rq.c | 2 +- include/linux/blk-mq.h | 3 +-- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index fc132933397f..886836a54064 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2843,11 +2843,11 @@ void blk_mq_submit_bio(struct bio *bio) #ifdef CONFIG_BLK_MQ_STACKING /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request - * @q: the queue to submit the request * @rq: the request being queued */ -blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) +blk_status_t blk_insert_cloned_request(struct request *rq) { + struct request_queue *q = rq->q; unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); blk_status_t ret; @@ -2881,8 +2881,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * return BLK_STS_IOERR; } - if (rq->q->disk && - should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq))) + if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) return BLK_STS_IOERR; if (blk_crypto_insert_cloned_request(rq)) @@ -2895,7 +2894,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - blk_mq_run_dispatch_ops(rq->q, + blk_mq_run_dispatch_ops(q, ret = blk_mq_request_issue_directly(rq, true)); if (ret) blk_account_io_done(rq, ktime_get_ns()); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 579ab6183d4d..2fcc9b7f391b 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -311,7 +311,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ clone->rq_flags |= RQF_IO_STAT; clone->start_time_ns = ktime_get_ns(); - r = blk_insert_cloned_request(clone->q, clone); + r = blk_insert_cloned_request(clone); if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) /* must complete clone in terms of original request */ dm_complete_request(rq, r); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d319ffa59354..3a41d50b85d3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -952,8 +952,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); void blk_rq_unprep_clone(struct request *rq); -blk_status_t blk_insert_cloned_request(struct request_queue *q, - struct request *rq); +blk_status_t blk_insert_cloned_request(struct request *rq); struct rq_map_data { struct page **pages; -- cgit v1.2.3-71-gd317 From 76792055c4c8b2472ca1ae48e0ddaf8497529f08 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Feb 2022 10:45:10 +0100 Subject: block: add a ->free_disk method Add a method to notify the driver that the gendisk is about to be freed. This allows drivers to tie the lifetime of their private data to that of the gendisk and thus deal with device removal races without expensive synchronization and boilerplate code. A new flag is added so that ->free_disk is only called after a successful call to add_disk, which significantly simplifies the error handling path during probing. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220215094514.3828912-2-hch@lst.de Signed-off-by: Jens Axboe --- block/genhd.c | 5 +++++ include/linux/blkdev.h | 2 ++ 2 files changed, 7 insertions(+) (limited to 'include/linux') diff --git a/block/genhd.c b/block/genhd.c index 9589d1d59afa..e351fac41bf2 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -526,6 +526,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, disk_update_readahead(disk); disk_add_events(disk); + set_bit(GD_ADDED, &disk->state); return 0; out_unregister_bdi: @@ -1119,6 +1120,10 @@ static void disk_release(struct device *dev) xa_destroy(&disk->part_tbl); disk->queue->disk = NULL; blk_put_queue(disk->queue); + + if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk) + disk->fops->free_disk(disk); + iput(disk->part0->bd_inode); /* frees the disk */ } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3bfc75a2a450..f757f9c2871f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -146,6 +146,7 @@ struct gendisk { #define GD_READ_ONLY 1 #define GD_DEAD 2 #define GD_NATIVE_CAPACITY 3 +#define GD_ADDED 4 struct mutex open_mutex; /* open/close mutex */ unsigned open_partitions; /* number of open partitions */ @@ -1464,6 +1465,7 @@ struct block_device_operations { void (*unlock_native_capacity) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); int (*set_read_only)(struct block_device *bdev, bool ro); + void (*free_disk)(struct gendisk *disk); /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, -- cgit v1.2.3-71-gd317 From 5224f79096170bf7b92cc8fe42a12f44b91e5f62 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 14 Feb 2022 19:11:44 -0600 Subject: treewide: Replace zero-length arrays with flexible-array members MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a regular need in the kernel to provide a way to declare having a dynamically sized set of trailing elements in a structure. Kernel code should always use “flexible array members”[1] for these cases. The older style of one-element or zero-length arrays should no longer be used[2]. This code was transformed with the help of Coccinelle: (next-20220214$ spatch --jobs $(getconf _NPROCESSORS_ONLN) --sp-file script.cocci --include-headers --dir . > output.patch) @@ identifier S, member, array; type T1, T2; @@ struct S { ... T1 member; T2 array[ - 0 ]; }; UAPI and wireless changes were intentionally excluded from this patch and will be sent out separately. [1] https://en.wikipedia.org/wiki/Flexible_array_member [2] https://www.kernel.org/doc/html/v5.16/process/deprecated.html#zero-length-and-one-element-arrays Link: https://github.com/KSPP/linux/issues/78 Reviewed-by: Kees Cook Signed-off-by: Gustavo A. R. Silva --- arch/alpha/include/asm/hwrpb.h | 2 +- arch/ia64/include/asm/sal.h | 2 +- arch/s390/include/asm/ccwgroup.h | 2 +- arch/s390/include/asm/chsc.h | 2 +- arch/s390/include/asm/eadm.h | 2 +- arch/s390/include/asm/fcx.h | 4 ++-- arch/s390/include/asm/idals.h | 2 +- arch/s390/include/asm/sclp.h | 2 +- arch/s390/include/asm/sysinfo.h | 6 +++--- arch/sh/include/asm/thread_info.h | 2 +- arch/sparc/include/asm/vio.h | 10 +++++----- arch/um/include/shared/net_kern.h | 2 +- arch/x86/include/asm/microcode_amd.h | 2 +- arch/x86/include/asm/microcode_intel.h | 4 ++-- arch/x86/include/asm/pci.h | 2 +- arch/x86/include/asm/pci_x86.h | 2 +- arch/xtensa/include/asm/bootparam.h | 2 +- drivers/crypto/caam/pdb.h | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 2 +- drivers/gpu/drm/nouveau/include/nvfw/hs.h | 2 +- drivers/hwtracing/coresight/coresight-config.h | 2 +- drivers/misc/bcm-vk/bcm_vk.h | 2 +- drivers/misc/habanalabs/include/common/cpucp_if.h | 6 +++--- .../misc/habanalabs/include/gaudi/gaudi_packets.h | 4 ++-- drivers/misc/habanalabs/include/goya/goya_packets.h | 4 ++-- drivers/net/ethernet/freescale/enetc/enetc_hw.h | 2 +- drivers/net/ethernet/i825xx/sun3_82586.h | 2 +- drivers/net/ethernet/marvell/octeontx2/af/npc.h | 6 +++--- drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h | 2 +- drivers/net/ethernet/ti/davinci_mdio.c | 2 +- drivers/scsi/dpt/dpti_i2o.h | 2 +- drivers/scsi/elx/libefc_sli/sli4.h | 20 ++++++++++---------- drivers/scsi/mpi3mr/mpi3mr.h | 2 +- drivers/scsi/qla2xxx/qla_bsg.h | 4 ++-- drivers/scsi/qla2xxx/qla_def.h | 2 +- drivers/scsi/qla2xxx/qla_edif_bsg.h | 4 ++-- drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla4xxx/ql4_fw.h | 2 +- drivers/staging/r8188eu/include/rtw_cmd.h | 10 +++++----- drivers/staging/rtl8712/rtl871x_cmd.h | 8 ++++---- drivers/staging/rtl8723bs/include/ieee80211.h | 2 +- drivers/staging/rtl8723bs/include/rtw_cmd.h | 2 +- .../vc04_services/include/linux/raspberrypi/vchiq.h | 2 +- drivers/visorbus/vbuschannel.h | 2 +- fs/cifs/ntlmssp.h | 2 +- fs/ext4/fast_commit.h | 4 ++-- fs/ksmbd/ksmbd_netlink.h | 2 +- fs/ksmbd/ntlmssp.h | 6 +++--- fs/ksmbd/smb2pdu.h | 8 ++++---- fs/ksmbd/transport_rdma.c | 2 +- fs/ksmbd/xattr.h | 2 +- fs/xfs/scrub/attr.h | 2 +- include/asm-generic/tlb.h | 4 ++-- include/linux/greybus/greybus_manifest.h | 4 ++-- include/linux/greybus/hd.h | 2 +- include/linux/greybus/module.h | 2 +- include/linux/i3c/ccc.h | 6 +++--- include/linux/platform_data/brcmfmac.h | 2 +- include/linux/platform_data/cros_ec_commands.h | 2 +- include/net/bluetooth/mgmt.h | 2 +- include/net/ioam6.h | 2 +- include/sound/sof/channel_map.h | 4 ++-- scripts/dtc/libfdt/fdt.h | 4 ++-- sound/soc/intel/atom/sst-mfld-dsp.h | 4 ++-- sound/soc/intel/skylake/skl-topology.h | 2 +- tools/lib/perf/include/perf/event.h | 2 +- 66 files changed, 111 insertions(+), 111 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/hwrpb.h b/arch/alpha/include/asm/hwrpb.h index d8180e527a1e..fc76f36265ad 100644 --- a/arch/alpha/include/asm/hwrpb.h +++ b/arch/alpha/include/asm/hwrpb.h @@ -152,7 +152,7 @@ struct memdesc_struct { unsigned long chksum; unsigned long optional_pa; unsigned long numclusters; - struct memclust_struct cluster[0]; + struct memclust_struct cluster[]; }; struct dsr_struct { diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index 78f4f7b40435..22749a201e92 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -420,7 +420,7 @@ typedef struct sal_log_processor_info { * The rest of this structure consists of variable-length arrays, which can't be * expressed in C. */ - sal_log_mod_error_info_t info[0]; + sal_log_mod_error_info_t info[]; /* * This is what the rest looked like if C supported variable-length arrays: * diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index aa995d91cd1d..11d2fb3de4f5 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -25,7 +25,7 @@ struct ccwgroup_device { unsigned int count; struct device dev; struct work_struct ungroup_work; - struct ccw_device *cdev[0]; + struct ccw_device *cdev[]; }; /** diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index ae4d2549cd67..bb48ea380c0d 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h @@ -63,7 +63,7 @@ struct chsc_pnso_area { struct chsc_header response; u32:32; struct chsc_pnso_naihdr naihdr; - struct chsc_pnso_naid_l2 entries[0]; + struct chsc_pnso_naid_l2 entries[]; } __packed __aligned(PAGE_SIZE); #endif /* _ASM_S390_CHSC_H */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 445fe4c8184a..06f795855af7 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -78,7 +78,7 @@ struct aob { struct aob_rq_header { struct scm_device *scmdev; - char data[0]; + char data[]; }; struct scm_device { diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index cff0749e9657..b8a028a36173 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h @@ -214,7 +214,7 @@ struct dcw_intrg_data { u32 :32; u64 time; u64 prog_id; - u8 prog_data[0]; + u8 prog_data[]; } __attribute__ ((packed)); #define DCW_FLAGS_CC (1 << (7 - 1)) @@ -241,7 +241,7 @@ struct dcw { u32 :8; u32 cd_count:8; u32 count; - u8 cd[0]; + u8 cd[]; } __attribute__ ((packed)); #define TCCB_FORMAT_DEFAULT 0x7f diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index 6fb7aced104a..40eae2c08d61 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -108,7 +108,7 @@ clear_normalized_cda(struct ccw1 * ccw) struct idal_buffer { size_t size; size_t page_order; - void *data[0]; + void *data[]; }; /* diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index c68ea35de498..22b3213c6c9d 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -112,7 +112,7 @@ struct zpci_report_error_header { * (OpenCrypto Successful Diagnostics Execution) */ u16 length; /* Length of Subsequent Data (up to 4K – SCLP header */ - u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */ + u8 data[]; /* Subsequent Data passed verbatim to SCLP ET 24 */ } __packed; extern char *sclp_early_sccb; diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index fe7b3f8f0791..ab1c6316055c 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -67,12 +67,12 @@ struct sysinfo_1_2_2 { unsigned short cpus_configured; unsigned short cpus_standby; unsigned short cpus_reserved; - unsigned short adjustment[0]; + unsigned short adjustment[]; }; struct sysinfo_1_2_2_extension { unsigned int alt_capability; - unsigned short alt_adjustment[0]; + unsigned short alt_adjustment[]; }; struct sysinfo_2_2_1 { @@ -181,7 +181,7 @@ struct sysinfo_15_1_x { unsigned char reserved1; unsigned char mnest; unsigned char reserved2[4]; - union topology_entry tle[0]; + union topology_entry tle[]; }; int stsi(void *sysinfo, int fc, int sel1, int sel2); diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 598d0184ffea..3a2d50d61fc9 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -33,7 +33,7 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space */ unsigned long previous_sp; /* sp of previous stack in case of nested IRQ stacks */ - __u8 supervisor_stack[0]; + __u8 supervisor_stack[]; }; #endif diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 8a1a83bbb6d5..2d7bdf665fd3 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -70,7 +70,7 @@ struct vio_dring_register { #define VIO_RX_DRING_DATA 0x0004 u16 resv; u32 num_cookies; - struct ldc_trans_cookie cookies[0]; + struct ldc_trans_cookie cookies[]; }; struct vio_dring_unregister { @@ -161,7 +161,7 @@ struct vio_disk_desc { u64 size; u32 ncookies; u32 resv2; - struct ldc_trans_cookie cookies[0]; + struct ldc_trans_cookie cookies[]; }; #define VIO_DISK_VNAME_LEN 8 @@ -200,13 +200,13 @@ struct vio_disk_devid { u16 resv; u16 type; u32 len; - char id[0]; + char id[]; }; struct vio_disk_efi { u64 lba; u64 len; - char data[0]; + char data[]; }; /* VIO net specific structures and defines */ @@ -246,7 +246,7 @@ struct vio_net_desc { struct vio_dring_hdr hdr; u32 size; u32 ncookies; - struct ldc_trans_cookie cookies[0]; + struct ldc_trans_cookie cookies[]; }; struct vio_net_dext { diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h index 441a8a309329..67b2e9a1f2e5 100644 --- a/arch/um/include/shared/net_kern.h +++ b/arch/um/include/shared/net_kern.h @@ -39,7 +39,7 @@ struct uml_net_private { void (*add_address)(unsigned char *, unsigned char *, void *); void (*delete_address)(unsigned char *, unsigned char *, void *); - char user[0]; + char user[]; }; struct net_kern_info { diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 7063b5a43220..ac31f9140d07 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -38,7 +38,7 @@ struct microcode_header_amd { struct microcode_amd { struct microcode_header_amd hdr; - unsigned int mpb[0]; + unsigned int mpb[]; }; #define PATCH_MAX_SIZE (3 * PAGE_SIZE) diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h index d85a07d7154f..4c92cea7e4b5 100644 --- a/arch/x86/include/asm/microcode_intel.h +++ b/arch/x86/include/asm/microcode_intel.h @@ -19,7 +19,7 @@ struct microcode_header_intel { struct microcode_intel { struct microcode_header_intel hdr; - unsigned int bits[0]; + unsigned int bits[]; }; /* microcode format is extended from prescott processors */ @@ -33,7 +33,7 @@ struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; - struct extended_signature sigs[0]; + struct extended_signature sigs[]; }; #define DEFAULT_UCODE_DATASIZE (2000) diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index d2c76c8d8cfd..f3fd5928bcbb 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -135,7 +135,7 @@ struct pci_setup_rom { unsigned long bus; unsigned long device; unsigned long function; - uint8_t romdata[0]; + uint8_t romdata[]; }; #endif /* _ASM_X86_PCI_H */ diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 490411dba438..3fb6fc596095 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -87,7 +87,7 @@ struct irq_routing_table { u32 miniport_data; /* Crap */ u8 rfu[11]; u8 checksum; /* Modulo 256 checksum must give 0 */ - struct irq_info slots[0]; + struct irq_info slots[]; } __attribute__((packed)); extern unsigned int pcibios_irq_mask; diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 892aab399ac8..6333bd1eb9d2 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h @@ -34,7 +34,7 @@ typedef struct bp_tag { unsigned short id; /* tag id */ unsigned short size; /* size of this record excluding the structure*/ - unsigned long data[0]; /* data */ + unsigned long data[]; /* data */ } bp_tag_t; struct bp_meminfo { diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 8ccc22075043..4b1bcf53f7ac 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h @@ -144,7 +144,7 @@ struct ipsec_encap_pdb { }; u32 spi; u32 ip_hdr_len; - u32 ip_hdr[0]; + u32 ip_hdr[]; }; /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 1a1edae67e4e..3acee0060e23 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -51,7 +51,7 @@ struct __guc_ads_blob { struct guc_gt_system_info system_info; struct guc_engine_usage engine_usage; /* From here on, location is dynamic! Refer to above diagram. */ - struct guc_mmio_reg regset[0]; + struct guc_mmio_reg regset[]; } __packed; static u32 guc_ads_regset_size(struct intel_guc *guc) diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h index 64d0d32200c2..b53bbc4cd130 100644 --- a/drivers/gpu/drm/nouveau/include/nvfw/hs.h +++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h @@ -23,7 +23,7 @@ struct nvfw_hs_load_header { u32 data_dma_base; u32 data_size; u32 num_apps; - u32 apps[0]; + u32 apps[]; }; const struct nvfw_hs_load_header * diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h index 9bd44b940add..2e1670523461 100644 --- a/drivers/hwtracing/coresight/coresight-config.h +++ b/drivers/hwtracing/coresight/coresight-config.h @@ -231,7 +231,7 @@ struct cscfg_config_csdev { bool enabled; struct list_head node; int nr_feat; - struct cscfg_feature_csdev *feats_csdev[0]; + struct cscfg_feature_csdev *feats_csdev[]; }; /** diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index a1338f375589..25d51222eedf 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -311,7 +311,7 @@ struct bcm_vk_peer_log { u32 wr_idx; u32 buf_size; u32 mask; - char data[0]; + char data[]; }; /* max buf size allowed */ diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 737c39f33f05..f9c4acc9bf5a 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -540,19 +540,19 @@ struct cpucp_packet { struct cpucp_unmask_irq_arr_packet { struct cpucp_packet cpucp_pkt; __le32 length; - __le32 irqs[0]; + __le32 irqs[]; }; struct cpucp_nic_status_packet { struct cpucp_packet cpucp_pkt; __le32 length; - __le32 data[0]; + __le32 data[]; }; struct cpucp_array_data_packet { struct cpucp_packet cpucp_pkt; __le32 length; - __le32 data[0]; + __le32 data[]; }; enum cpucp_packet_rc { diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h index 6e097ace2e96..66fc083a7c6a 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -54,7 +54,7 @@ struct gaudi_packet { /* The rest of the packet data follows. Use the corresponding * packet_XXX struct to deference the data, based on packet type */ - u8 contents[0]; + u8 contents[]; }; struct packet_nop { @@ -75,7 +75,7 @@ struct packet_wreg32 { struct packet_wreg_bulk { __le32 size64; __le32 ctl; - __le64 values[0]; /* data starts here */ + __le64 values[]; /* data starts here */ }; #define GAUDI_PKT_LONG_CTL_OP_SHIFT 20 diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h index ef54bad20509..50ce5175b63a 100644 --- a/drivers/misc/habanalabs/include/goya/goya_packets.h +++ b/drivers/misc/habanalabs/include/goya/goya_packets.h @@ -62,7 +62,7 @@ struct goya_packet { /* The rest of the packet data follows. Use the corresponding * packet_XXX struct to deference the data, based on packet type */ - u8 contents[0]; + u8 contents[]; }; struct packet_nop { @@ -86,7 +86,7 @@ struct packet_wreg32 { struct packet_wreg_bulk { __le32 size64; __le32 ctl; - __le64 values[0]; /* data starts here */ + __le64 values[]; /* data starts here */ }; struct packet_msg_long { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 1514e6a4a3ff..ce5b677e8c2f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -881,7 +881,7 @@ struct sgcl_data { u32 bth; u32 ct; u32 cte; - struct sgce sgcl[0]; + struct sgce sgcl[]; }; #define ENETC_CBDR_FMI_MR BIT(0) diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h index 79aef681ac85..451cb3d26cb5 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.h +++ b/drivers/net/ethernet/i825xx/sun3_82586.h @@ -250,7 +250,7 @@ struct mcsetup_cmd_struct unsigned short cmd_cmd; unsigned short cmd_link; unsigned short mc_cnt; /* number of bytes in the MC-List */ - unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ + unsigned char mc_list[][6]; /* pointer to 6 bytes entries */ }; /* diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 77fd39e2c8db..9b6e587e78b4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -455,7 +455,7 @@ struct npc_coalesced_kpu_prfl { u8 name[NPC_NAME_LEN]; /* KPU Profile name */ u64 version; /* KPU firmware/profile version */ u8 num_prfl; /* No of NPC profiles. */ - u16 prfl_sz[0]; + u16 prfl_sz[]; }; struct npc_mcam_kex { @@ -482,7 +482,7 @@ struct npc_kpu_fwdata { * struct npc_kpu_profile_cam[entries]; * struct npc_kpu_profile_action[entries]; */ - u8 data[0]; + u8 data[]; } __packed; struct npc_lt_def { @@ -572,7 +572,7 @@ struct npc_kpu_profile_fwdata { * Custom KPU CAM and ACTION configuration entries. * struct npc_kpu_fwdata kpu[kpus]; */ - u8 data[0]; + u8 data[]; } __packed; struct rvu_npc_mcam_rule { diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h index b70ee8200e15..6459dd3feb37 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -2470,6 +2470,6 @@ struct nvm_meta_bin_t { u32 version; #define NVM_META_BIN_VERSION 1 u32 num_options; - u32 options[0]; + u32 options[]; }; #endif diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index a4efd5e35158..fce2626e34fa 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -70,7 +70,7 @@ struct davinci_mdio_regs { #define USERACCESS_DATA (0xffff) u32 physel; - } user[0]; + } user[]; }; static const struct mdio_platform_data default_pdata = { diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h index bf0daeeb50a9..e1fbbf55c09d 100644 --- a/drivers/scsi/dpt/dpti_i2o.h +++ b/drivers/scsi/dpt/dpti_i2o.h @@ -123,7 +123,7 @@ struct i2o_sys_tbl u32 change_ind; u32 reserved2; u32 reserved3; - struct i2o_sys_tbl_entry iops[0]; + struct i2o_sys_tbl_entry iops[]; }; /* diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h index ee2a9e65a88d..38af166cc786 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.h +++ b/drivers/scsi/elx/libefc_sli/sli4.h @@ -609,7 +609,7 @@ struct sli4_rqst_cmn_create_cq_v2 { __le16 cqe_count; __le16 rsvd30; __le32 rsvd32; - struct sli4_dmaaddr page_phys_addr[0]; + struct sli4_dmaaddr page_phys_addr[]; }; enum sli4_create_cqset_e { @@ -634,7 +634,7 @@ struct sli4_rqst_cmn_create_cq_set_v0 { __le16 num_cq_req; __le16 dw6w1_flags; __le16 eq_id[16]; - struct sli4_dmaaddr page_phys_addr[0]; + struct sli4_dmaaddr page_phys_addr[]; }; /* CQE count */ @@ -764,7 +764,7 @@ struct sli4_rqst_cmn_create_mq_ext { __le32 dw7_val; __le32 dw8_flags; __le32 rsvd36; - struct sli4_dmaaddr page_phys_addr[0]; + struct sli4_dmaaddr page_phys_addr[]; }; struct sli4_rsp_cmn_create_mq_ext { @@ -802,7 +802,7 @@ struct sli4_rqst_cmn_create_cq_v0 { __le32 dw6_flags; __le32 rsvd28; __le32 rsvd32; - struct sli4_dmaaddr page_phys_addr[0]; + struct sli4_dmaaddr page_phys_addr[]; }; enum sli4_create_rq_e { @@ -887,7 +887,7 @@ struct sli4_rqst_rq_create_v2 { __le16 base_cq_id; __le16 rsvd26; __le32 rsvd42; - struct sli4_dmaaddr page_phys_addr[0]; + struct sli4_dmaaddr page_phys_addr[]; }; struct sli4_rsp_rq_create_v2 { @@ -3168,7 +3168,7 @@ struct sli4_rqst_cmn_read_object { __le32 read_offset; u8 object_name[104]; __le32 host_buffer_descriptor_count; - struct sli4_bde host_buffer_descriptor[0]; + struct sli4_bde host_buffer_descriptor[]; }; #define RSP_COM_READ_OBJ_EOF 0x80000000 @@ -3191,7 +3191,7 @@ struct sli4_rqst_cmn_write_object { __le32 write_offset; u8 object_name[104]; __le32 host_buffer_descriptor_count; - struct sli4_bde host_buffer_descriptor[0]; + struct sli4_bde host_buffer_descriptor[]; }; #define RSP_CHANGE_STATUS 0xff @@ -3217,7 +3217,7 @@ struct sli4_rqst_cmn_read_object_list { __le32 read_offset; u8 object_name[104]; __le32 host_buffer_descriptor_count; - struct sli4_bde host_buffer_descriptor[0]; + struct sli4_bde host_buffer_descriptor[]; }; enum sli4_rqst_set_dump_flags { @@ -3342,7 +3342,7 @@ struct sli4_rspource_descriptor_v1 { u8 descriptor_type; u8 descriptor_length; __le16 rsvd16; - __le32 type_specific[0]; + __le32 type_specific[]; }; enum sli4_pcie_desc_flags { @@ -3474,7 +3474,7 @@ struct sli4_rqst_post_hdr_templates { struct sli4_rqst_hdr hdr; __le16 rpi_offset; __le16 page_count; - struct sli4_dmaaddr page_descriptor[0]; + struct sli4_dmaaddr page_descriptor[]; }; #define SLI4_HDR_TEMPLATE_SIZE 64 diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index fc4eaf6d1e47..fb7d8775c9f7 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -878,7 +878,7 @@ struct mpi3mr_fwevt { bool process_evt; u32 evt_ctx; struct kref ref_count; - char event_data[0] __aligned(4); + char event_data[] __aligned(4); }; diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 0f8a4c7e52a2..6d2b0a7436c1 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -157,7 +157,7 @@ struct qla84_msg_mgmt { uint16_t rsrvd; struct qla84_mgmt_param mgmtp;/* parameters for cmd */ uint32_t len; /* bytes in payload following this struct */ - uint8_t payload[0]; /* payload for cmd */ + uint8_t payload[]; /* payload for cmd */ }; struct qla_bsg_a84_mgmt { @@ -216,7 +216,7 @@ struct qla_image_version { struct qla_image_version_list { uint32_t count; - struct qla_image_version version[0]; + struct qla_image_version version[]; } __packed; struct qla_status_reg { diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9ebf4a234d9a..b6434c72dee3 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5410,7 +5410,7 @@ struct ql_vnd_stat_entry { struct ql_vnd_stats { u64 entry_count; /* Num of entries */ u64 rservd; - struct ql_vnd_stat_entry entry[0]; /* Place holder of entries */ + struct ql_vnd_stat_entry entry[]; /* Place holder of entries */ } __packed; struct ql_vnd_host_stats_resp { diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h index 53026d82ebff..5a26c77157da 100644 --- a/drivers/scsi/qla2xxx/qla_edif_bsg.h +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -121,7 +121,7 @@ struct app_pinfo { struct app_pinfo_reply { uint8_t port_count; uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; - struct app_pinfo ports[0]; + struct app_pinfo ports[]; } __packed; struct app_sinfo_req { @@ -140,7 +140,7 @@ struct app_sinfo { struct app_stats_reply { uint8_t elem_count; - struct app_sinfo elem[0]; + struct app_sinfo elem[]; } __packed; struct qla_sa_update_frame { diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 073d06e88c58..0bb1d562f0bf 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1706,7 +1706,7 @@ struct qla_flt_header { __le16 length; __le16 checksum; __le16 unused; - struct qla_flt_region region[0]; + struct qla_flt_region region[]; }; #define FLT_REGION_SIZE 16 diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 4e1764df0a73..860ec61b51b9 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -1028,7 +1028,7 @@ struct crash_record { uint8_t out_RISC_reg_dump[256]; /* 80 -17F */ uint8_t in_RISC_reg_dump[256]; /*180 -27F */ - uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */ + uint8_t in_out_RISC_stack_dump[]; /*280 - ??? */ }; struct conn_event_log_entry { diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h index cf0945ae11c1..f8991a0493d0 100644 --- a/drivers/staging/r8188eu/include/rtw_cmd.h +++ b/drivers/staging/r8188eu/include/rtw_cmd.h @@ -73,7 +73,7 @@ struct c2h_evt_hdr { u8 id:4; u8 plen:4; u8 seq; - u8 payload[0]; + u8 payload[]; }; #define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen) @@ -662,25 +662,25 @@ struct getcurtxpwrlevel_rspi { struct setprobereqextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setassocreqextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setproberspextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setassocrspextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct addBaReq_parm { diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h index ddd69c4ae208..95e9ea5b2d98 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.h +++ b/drivers/staging/rtl8712/rtl871x_cmd.h @@ -657,25 +657,25 @@ struct setra_parm { struct setprobereqextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setassocreqextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setproberspextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct setassocrspextraie_parm { unsigned char e_id; unsigned char ie_len; - unsigned char ie[0]; + unsigned char ie[]; }; struct addBaReq_parm { diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h index c11d7e2d2347..1e627dc0044d 100644 --- a/drivers/staging/rtl8723bs/include/ieee80211.h +++ b/drivers/staging/rtl8723bs/include/ieee80211.h @@ -204,7 +204,7 @@ struct ieee_param { struct ieee_param_ex { u32 cmd; u8 sta_addr[ETH_ALEN]; - u8 data[0]; + u8 data[]; }; struct sta_data { diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h index 28d2d2732374..1bf030cbbbbe 100644 --- a/drivers/staging/rtl8723bs/include/rtw_cmd.h +++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h @@ -94,7 +94,7 @@ struct c2h_evt_hdr { u8 id:4; u8 plen:4; u8 seq; - u8 payload[0]; + u8 payload[]; }; struct c2h_evt_hdr_88xx { diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h index 81db7fb76d6d..c93f2f3e87bb 100644 --- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h +++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h @@ -45,7 +45,7 @@ struct vchiq_header { /* Size of message data. */ unsigned int size; - char data[0]; /* message */ + char data[]; /* message */ }; struct vchiq_element { diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h index 4aaf6564eb9f..98711fb6d66e 100644 --- a/drivers/visorbus/vbuschannel.h +++ b/drivers/visorbus/vbuschannel.h @@ -89,7 +89,7 @@ struct visor_vbus_channel { struct visor_vbus_headerinfo hdr_info; struct visor_vbus_deviceinfo chp_info; struct visor_vbus_deviceinfo bus_info; - struct visor_vbus_deviceinfo dev_info[0]; + struct visor_vbus_deviceinfo dev_info[]; } __packed; #endif diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h index 298458404252..55758b9ec877 100644 --- a/fs/cifs/ntlmssp.h +++ b/fs/cifs/ntlmssp.h @@ -107,7 +107,7 @@ struct negotiate_message { SECURITY_BUFFER WorkstationName; /* RFC 1001 and ASCII */ struct ntlmssp_version Version; /* SECURITY_BUFFER */ - char DomainString[0]; + char DomainString[]; /* followed by WorkstationString */ } __packed; diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index 083ad1cb705a..07e8b734c4fd 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -55,13 +55,13 @@ struct ext4_fc_del_range { struct ext4_fc_dentry_info { __le32 fc_parent_ino; __le32 fc_ino; - __u8 fc_dname[0]; + __u8 fc_dname[]; }; /* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */ struct ext4_fc_inode { __le32 fc_ino; - __u8 fc_raw_inode[0]; + __u8 fc_raw_inode[]; }; /* Value structure for tag EXT4_FC_TAG_TAIL. */ diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h index 71bfb7de4472..ebe6ca08467a 100644 --- a/fs/ksmbd/ksmbd_netlink.h +++ b/fs/ksmbd/ksmbd_netlink.h @@ -241,7 +241,7 @@ struct ksmbd_rpc_command { struct ksmbd_spnego_authen_request { __u32 handle; __u16 spnego_blob_len; /* the length of spnego_blob */ - __u8 spnego_blob[0]; /* + __u8 spnego_blob[]; /* * the GSS token from SecurityBuffer of * SMB2 SESSION SETUP request */ diff --git a/fs/ksmbd/ntlmssp.h b/fs/ksmbd/ntlmssp.h index adaf4c0cbe8f..f13153c18b4e 100644 --- a/fs/ksmbd/ntlmssp.h +++ b/fs/ksmbd/ntlmssp.h @@ -95,7 +95,7 @@ struct security_buffer { struct target_info { __le16 Type; __le16 Length; - __u8 Content[0]; + __u8 Content[]; } __packed; struct negotiate_message { @@ -108,7 +108,7 @@ struct negotiate_message { * struct security_buffer for version info not present since we * do not set the version is present flag */ - char DomainString[0]; + char DomainString[]; /* followed by WorkstationString */ } __packed; @@ -140,7 +140,7 @@ struct authenticate_message { * struct security_buffer for version info not present since we * do not set the version is present flag */ - char UserString[0]; + char UserString[]; } __packed; struct ntlmv2_resp { diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h index 725b800c29c8..d49468426576 100644 --- a/fs/ksmbd/smb2pdu.h +++ b/fs/ksmbd/smb2pdu.h @@ -759,7 +759,7 @@ struct smb2_file_rename_info { /* encoding of request for level 10 */ __u8 Reserved[7]; __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ __le32 FileNameLength; - char FileName[0]; /* New name to be assigned */ + char FileName[]; /* New name to be assigned */ } __packed; /* level 10 Set */ struct smb2_file_link_info { /* encoding of request for level 11 */ @@ -768,7 +768,7 @@ struct smb2_file_link_info { /* encoding of request for level 11 */ __u8 Reserved[7]; __u64 RootDirectory; /* MBZ for network operations (why says spec?) */ __le32 FileNameLength; - char FileName[0]; /* Name to be assigned to new link */ + char FileName[]; /* Name to be assigned to new link */ } __packed; /* level 11 Set */ /* @@ -810,7 +810,7 @@ struct smb2_file_basic_info { /* data block encoding of response to level 18 */ struct smb2_file_alt_name_info { __le32 FileNameLength; - char FileName[0]; + char FileName[]; } __packed; struct smb2_file_stream_info { @@ -818,7 +818,7 @@ struct smb2_file_stream_info { __le32 StreamNameLength; __le64 StreamSize; __le64 StreamAllocationSize; - char StreamName[0]; + char StreamName[]; } __packed; struct smb2_file_eof_info { /* encoding of request for level 10 */ diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c index 3c1ec1ac0b27..9976d39c6ed8 100644 --- a/fs/ksmbd/transport_rdma.c +++ b/fs/ksmbd/transport_rdma.c @@ -211,7 +211,7 @@ struct smb_direct_rdma_rw_msg { struct completion *completion; struct rdma_rw_ctx rw_ctx; struct sg_table sgt; - struct scatterlist sg_list[0]; + struct scatterlist sg_list[]; }; static inline int get_buf_page_count(void *buf, int size) diff --git a/fs/ksmbd/xattr.h b/fs/ksmbd/xattr.h index 8857c01093d9..16499ca5c82d 100644 --- a/fs/ksmbd/xattr.h +++ b/fs/ksmbd/xattr.h @@ -76,7 +76,7 @@ struct xattr_acl_entry { struct xattr_smb_acl { int count; int next; - struct xattr_acl_entry entries[0]; + struct xattr_acl_entry entries[]; }; /* 64bytes hash in xattr_ntacl is computed with sha256 */ diff --git a/fs/xfs/scrub/attr.h b/fs/xfs/scrub/attr.h index 1719e1c4da59..3590e10e3e62 100644 --- a/fs/xfs/scrub/attr.h +++ b/fs/xfs/scrub/attr.h @@ -24,7 +24,7 @@ struct xchk_xattr_buf { * space bitmap follows immediately after; and we have a third buffer * for storing intermediate bitmap results. */ - uint8_t buf[0]; + uint8_t buf[]; }; /* A place to store attribute values. */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 2c68a545ffa7..fd7feb5c7894 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -180,7 +180,7 @@ struct mmu_table_batch { struct rcu_head rcu; #endif unsigned int nr; - void *tables[0]; + void *tables[]; }; #define MAX_TABLE_BATCH \ @@ -227,7 +227,7 @@ struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; - struct page *pages[0]; + struct page *pages[]; }; #define MAX_GATHER_BATCH \ diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h index 6e62fe478712..bef9eb2093e9 100644 --- a/include/linux/greybus/greybus_manifest.h +++ b/include/linux/greybus/greybus_manifest.h @@ -100,7 +100,7 @@ enum { struct greybus_descriptor_string { __u8 length; __u8 id; - __u8 string[0]; + __u8 string[]; } __packed; /* @@ -175,7 +175,7 @@ struct greybus_manifest_header { struct greybus_manifest { struct greybus_manifest_header header; - struct greybus_descriptor descriptors[0]; + struct greybus_descriptor descriptors[]; } __packed; #endif /* __GREYBUS_MANIFEST_H */ diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h index d3faf0c1a569..718e2857054e 100644 --- a/include/linux/greybus/hd.h +++ b/include/linux/greybus/hd.h @@ -58,7 +58,7 @@ struct gb_host_device { struct gb_svc *svc; /* Private data for the host driver */ - unsigned long hd_priv[0] __aligned(sizeof(s64)); + unsigned long hd_priv[] __aligned(sizeof(s64)); }; #define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h index 47b839af145d..3efe2133acfd 100644 --- a/include/linux/greybus/module.h +++ b/include/linux/greybus/module.h @@ -23,7 +23,7 @@ struct gb_module { bool disconnected; - struct gb_interface *interfaces[0]; + struct gb_interface *interfaces[]; }; #define to_gb_module(d) container_of(d, struct gb_module, dev) diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h index 73b0982cc519..ad59a4ae60d1 100644 --- a/include/linux/i3c/ccc.h +++ b/include/linux/i3c/ccc.h @@ -132,7 +132,7 @@ struct i3c_ccc_dev_desc { struct i3c_ccc_defslvs { u8 count; struct i3c_ccc_dev_desc master; - struct i3c_ccc_dev_desc slaves[0]; + struct i3c_ccc_dev_desc slaves[]; } __packed; /** @@ -240,7 +240,7 @@ struct i3c_ccc_bridged_slave_desc { */ struct i3c_ccc_setbrgtgt { u8 count; - struct i3c_ccc_bridged_slave_desc bslaves[0]; + struct i3c_ccc_bridged_slave_desc bslaves[]; } __packed; /** @@ -318,7 +318,7 @@ enum i3c_ccc_setxtime_subcmd { */ struct i3c_ccc_setxtime { u8 subcmd; - u8 data[0]; + u8 data[]; } __packed; #define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h index 2b5676ff35be..f922a192fe58 100644 --- a/include/linux/platform_data/brcmfmac.h +++ b/include/linux/platform_data/brcmfmac.h @@ -178,7 +178,7 @@ struct brcmfmac_platform_data { void (*power_off)(void); char *fw_alternative_path; int device_count; - struct brcmfmac_pd_device devices[0]; + struct brcmfmac_pd_device devices[]; }; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 271bd87bff0a..728735aed980 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5644,7 +5644,7 @@ struct ec_response_typec_discovery { uint8_t svid_count; /* Number of SVIDs partner sent */ uint16_t reserved; uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */ - struct svid_mode_info svids[0]; + struct svid_mode_info svids[]; } __ec_align1; /* USB Type-C commands for AP-controlled device policy. */ diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 107b25deae68..9607ec289fd0 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -696,7 +696,7 @@ struct mgmt_cp_set_blocked_keys { #define MGMT_READ_CONTROLLER_CAP_SIZE 0 struct mgmt_rp_read_controller_cap { __le16 cap_len; - __u8 cap[0]; + __u8 cap[]; } __packed; #define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049 diff --git a/include/net/ioam6.h b/include/net/ioam6.h index 3f45ba37a2c6..781d2d8b2f29 100644 --- a/include/net/ioam6.h +++ b/include/net/ioam6.h @@ -35,7 +35,7 @@ struct ioam6_schema { int len; __be32 hdr; - u8 data[0]; + u8 data[]; }; struct ioam6_pernet_data { diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h index fd3a30fcf756..d363f0ca6979 100644 --- a/include/sound/sof/channel_map.h +++ b/include/sound/sof/channel_map.h @@ -39,7 +39,7 @@ struct sof_ipc_channel_map { uint32_t ext_id; uint32_t ch_mask; uint32_t reserved; - int32_t ch_coeffs[0]; + int32_t ch_coeffs[]; } __packed; /** @@ -55,7 +55,7 @@ struct sof_ipc_stream_map { struct sof_ipc_cmd_hdr hdr; uint32_t num_ch_map; uint32_t reserved[3]; - struct sof_ipc_channel_map ch_map[0]; + struct sof_ipc_channel_map ch_map[]; } __packed; #endif /* __IPC_CHANNEL_MAP_H__ */ diff --git a/scripts/dtc/libfdt/fdt.h b/scripts/dtc/libfdt/fdt.h index f2e68807f277..0c91aa7f67b5 100644 --- a/scripts/dtc/libfdt/fdt.h +++ b/scripts/dtc/libfdt/fdt.h @@ -35,14 +35,14 @@ struct fdt_reserve_entry { struct fdt_node_header { fdt32_t tag; - char name[0]; + char name[]; }; struct fdt_property { fdt32_t tag; fdt32_t len; fdt32_t nameoff; - char data[0]; + char data[]; }; #endif /* !__ASSEMBLY */ diff --git a/sound/soc/intel/atom/sst-mfld-dsp.h b/sound/soc/intel/atom/sst-mfld-dsp.h index 8d9e29b16e57..c8f0816edb53 100644 --- a/sound/soc/intel/atom/sst-mfld-dsp.h +++ b/sound/soc/intel/atom/sst-mfld-dsp.h @@ -427,7 +427,7 @@ struct snd_sst_drop_response { struct snd_sst_async_msg { u32 msg_id; /* Async msg id */ - u32 payload[0]; + u32 payload[]; }; struct snd_sst_async_err_msg { @@ -514,7 +514,7 @@ struct snd_sst_bytes_v2 { u8 pipe_id; u8 rsvd; u16 len; - char bytes[0]; + char bytes[]; }; #define MAX_VTSV_FILES 2 diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h index 22963634fbea..a5bccf2fcd88 100644 --- a/sound/soc/intel/skylake/skl-topology.h +++ b/sound/soc/intel/skylake/skl-topology.h @@ -164,7 +164,7 @@ struct skl_base_cfg_ext { u8 reserved[8]; u32 priv_param_length; /* Input pin formats followed by output ones. */ - struct skl_pin_format pins_fmt[0]; + struct skl_pin_format pins_fmt[]; } __packed; struct skl_algo_cfg { diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h index 75ee385fb078..e7758707cadd 100644 --- a/tools/lib/perf/include/perf/event.h +++ b/tools/lib/perf/include/perf/event.h @@ -240,7 +240,7 @@ struct id_index_entry { struct perf_record_id_index { struct perf_event_header header; __u64 nr; - struct id_index_entry entries[0]; + struct id_index_entry entries[]; }; struct perf_record_auxtrace_info { -- cgit v1.2.3-71-gd317 From 7a5428dcb7902700b830e912feee4e845df7c019 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 17 Feb 2022 08:52:31 +0100 Subject: block: fix surprise removal for drivers calling blk_set_queue_dying MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Various block drivers call blk_set_queue_dying to mark a disk as dead due to surprise removal events, but since commit 8e141f9eb803 that doesn't work given that the GD_DEAD flag needs to be set to stop I/O. Replace the driver calls to blk_set_queue_dying with a new (and properly documented) blk_mark_disk_dead API, and fold blk_set_queue_dying into the only remaining caller. Fixes: 8e141f9eb803 ("block: drain file system I/O on del_gendisk") Reported-by: Markus Blöchl Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Link: https://lore.kernel.org/r/20220217075231.1140-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 10 ++-------- block/genhd.c | 14 ++++++++++++++ drivers/block/mtip32xx/mtip32xx.c | 2 +- drivers/block/rbd.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/md/dm.c | 2 +- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/multipath.c | 2 +- include/linux/blkdev.h | 3 ++- 9 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index d93e3bb9a769..1039515c99d6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q) wake_up_all(&q->mq_freeze_wq); } -void blk_set_queue_dying(struct request_queue *q) -{ - blk_queue_flag_set(QUEUE_FLAG_DYING, q); - blk_queue_start_drain(q); -} -EXPORT_SYMBOL_GPL(blk_set_queue_dying); - /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q) WARN_ON_ONCE(blk_queue_registered(q)); /* mark @q DYING, no new request or merges will be allowed afterwards */ - blk_set_queue_dying(q); + blk_queue_flag_set(QUEUE_FLAG_DYING, q); + blk_queue_start_drain(q); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); diff --git a/block/genhd.c b/block/genhd.c index 626c8406f21a..9eca1f7d35c9 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -548,6 +548,20 @@ out_free_ext_minor: } EXPORT_SYMBOL(device_add_disk); +/** + * blk_mark_disk_dead - mark a disk as dead + * @disk: disk to mark as dead + * + * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O + * to this disk. + */ +void blk_mark_disk_dead(struct gendisk *disk) +{ + set_bit(GD_DEAD, &disk->state); + blk_queue_start_drain(disk->queue); +} +EXPORT_SYMBOL_GPL(blk_mark_disk_dead); + /** * del_gendisk - remove the gendisk * @disk: the struct gendisk to remove diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index e6005c232328..2b588b62cbbb 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) "Completion workers still active!\n"); } - blk_set_queue_dying(dd->queue); + blk_mark_disk_dead(dd->disk); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); /* Clean up the block layer. */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4203cdab8abf..b844432bad20 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, * IO to complete/fail. */ blk_mq_freeze_queue(rbd_dev->disk->queue); - blk_set_queue_dying(rbd_dev->disk->queue); + blk_mark_disk_dead(rbd_dev->disk); } del_gendisk(rbd_dev->disk); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ccd0dd0c6b83..ca71a0585333 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - blk_set_queue_dying(info->rq); + blk_mark_disk_dead(info->gd); set_capacity(info->gd, 0); for_each_rinfo(info, rinfo, i) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dcbd6d201619..997ace47bbd5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - blk_set_queue_dying(md->queue); + blk_mark_disk_dead(md->disk); /* * Take suspend_lock so that presuspend and postsuspend methods diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 79005ea1a33e..469f23186159 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4574,7 +4574,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - blk_set_queue_dying(ns->queue); + blk_mark_disk_dead(ns->disk); nvme_start_ns_queue(ns); set_capacity_and_notify(ns->disk, 0); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index f8bf6606eb2f..ff775235534c 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_set_queue_dying(head->disk->queue); + blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f35aea98bc35..16b47035e4b0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); -extern void blk_set_queue_dying(struct request_queue *); + +void blk_mark_disk_dead(struct gendisk *disk); #ifdef CONFIG_BLOCK /* -- cgit v1.2.3-71-gd317 From 2e7dfb0e9cacad0f1adbc4b97f0b96ba35027f24 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 13 Feb 2022 23:01:16 -0600 Subject: usb: typec: Factor out non-PD fwnode properties Basic programmable non-PD Type-C port controllers do not need the full TCPM library, but they share the same devicetree binding and the same typec_capability structure. Factor out a helper for parsing those properties which map to fields in struct typec_capability, so the code can be shared between TCPM and basic non-TCPM drivers. Reviewed-by: Heikki Krogerus Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20220214050118.61015-4-samuel@sholland.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/class.c | 43 +++++++++++++++++++++++++++++++++++++++++++ drivers/usb/typec/tcpm/tcpm.c | 24 +----------------------- include/linux/usb/typec.h | 3 +++ 3 files changed, 47 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 45a6f0c807cb..ee0e520707dd 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1894,6 +1894,49 @@ void *typec_get_drvdata(struct typec_port *port) } EXPORT_SYMBOL_GPL(typec_get_drvdata); +int typec_get_fw_cap(struct typec_capability *cap, + struct fwnode_handle *fwnode) +{ + const char *cap_str; + int ret; + + cap->fwnode = fwnode; + + ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); + if (ret < 0) + return ret; + + ret = typec_find_port_power_role(cap_str); + if (ret < 0) + return ret; + cap->type = ret; + + /* USB data support is optional */ + ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); + if (ret == 0) { + ret = typec_find_port_data_role(cap_str); + if (ret < 0) + return ret; + cap->data = ret; + } + + /* Get the preferred power role for a DRP */ + if (cap->type == TYPEC_PORT_DRP) { + cap->prefer_role = TYPEC_NO_PREFERRED_ROLE; + + ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); + if (ret == 0) { + ret = typec_find_power_role(cap_str); + if (ret < 0) + return ret; + cap->prefer_role = ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(typec_get_fw_cap); + /** * typec_port_register_altmode - Register USB Type-C Port Alternate Mode * @port: USB Type-C Port that supports the alternate mode diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 5fce795b69c7..3bc2f4ebd1fe 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5928,7 +5928,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode) { const char *opmode_str; - const char *cap_str; int ret; u32 mw, frs_current; @@ -5944,23 +5943,10 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, */ fw_devlink_purge_absent_suppliers(fwnode); - /* USB data support is optional */ - ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); - if (ret == 0) { - ret = typec_find_port_data_role(cap_str); - if (ret < 0) - return ret; - port->typec_caps.data = ret; - } - - ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); + ret = typec_get_fw_cap(&port->typec_caps, fwnode); if (ret < 0) return ret; - ret = typec_find_port_power_role(cap_str); - if (ret < 0) - return ret; - port->typec_caps.type = ret; port->port_type = port->typec_caps.type; port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable"); @@ -5997,14 +5983,6 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, if (port->port_type == TYPEC_PORT_SRC) return 0; - /* Get the preferred power role for DRP */ - ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str); - if (ret < 0) - return ret; - - port->typec_caps.prefer_role = typec_find_power_role(cap_str); - if (port->typec_caps.prefer_role < 0) - return -EINVAL; sink: port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 7ba45a97eeae..fdf737d48b3b 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -295,6 +295,9 @@ int typec_set_mode(struct typec_port *port, int mode); void *typec_get_drvdata(struct typec_port *port); +int typec_get_fw_cap(struct typec_capability *cap, + struct fwnode_handle *fwnode); + int typec_find_pwr_opmode(const char *name); int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); -- cgit v1.2.3-71-gd317 From ebcbc6ea7d8a604ad8504dae70a6ac1b1e64a0b7 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Feb 2022 18:20:24 -0800 Subject: mm/munlock: delete page_mlock() and all its works We have recommended some applications to mlock their userspace, but that turns out to be counter-productive: when many processes mlock the same file, contention on rmap's i_mmap_rwsem can become intolerable at exit: it is needed for write, to remove any vma mapping that file from rmap's tree; but hogged for read by those with mlocks calling page_mlock() (formerly known as try_to_munlock()) on *each* page mapped from the file (the purpose being to find out whether another process has the page mlocked, so therefore it should not be unmlocked yet). Several optimizations have been made in the past: one is to skip page_mlock() when mapcount tells that nothing else has this page mapped; but that doesn't help at all when others do have it mapped. This time around, I initially intended to add a preliminary search of the rmap tree for overlapping VM_LOCKED ranges; but that gets messy with locking order, when in doubt whether a page is actually present; and risks adding even more contention on the i_mmap_rwsem. A solution would be much easier, if only there were space in struct page for an mlock_count... but actually, most of the time, there is space for it - an mlocked page spends most of its life on an unevictable LRU, but since 3.18 removed the scan_unevictable_pages sysctl, that "LRU" has been redundant. Let's try to reuse its page->lru. But leave that until a later patch: in this patch, clear the ground by removing page_mlock(), and all the infrastructure that has gathered around it - which mostly hinders understanding, and will make reviewing new additions harder. Don't mind those old comments about THPs, they date from before 4.5's refcounting rework: splitting is not a risk here. Just keep a minimal version of munlock_vma_page(), as reminder of what it should attend to (in particular, the odd way PGSTRANDED is counted out of PGMUNLOCKED), and likewise a stub for munlock_vma_pages_range(). Move unchanged __mlock_posix_error_return() out of the way, down to above its caller: this series then makes no further change after mlock_fixup(). After this and each following commit, the kernel builds, boots and runs; but with deficiencies which may show up in testing of mlock and munlock. The system calls succeed or fail as before, and mlock remains effective in preventing page reclaim; but meminfo's Unevictable and Mlocked amounts may be shown too low after mlock, grow, then stay too high after munlock: with previously mlocked pages remaining unevictable for too long, until finally unmapped and freed and counts corrected. Normal service will be resumed in "mm/munlock: mlock_pte_range() when mlocking or munlocking". Signed-off-by: Hugh Dickins Acked-by: Vlastimil Babka Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 6 - mm/internal.h | 2 +- mm/mlock.c | 375 ++++----------------------------------------------- mm/rmap.c | 80 ----------- 4 files changed, 25 insertions(+), 438 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index e704b1a4c06c..dc48aa8c2c94 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -237,12 +237,6 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); */ int folio_mkclean(struct folio *); -/* - * called in munlock()/munmap() path to check for other vmas holding - * the page mlocked. - */ -void page_mlock(struct page *page); - void remove_migration_ptes(struct page *old, struct page *new, bool locked); /* diff --git a/mm/internal.h b/mm/internal.h index d80300392a19..e48c486d5ddf 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -409,7 +409,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) * must be called with vma's mmap_lock held for read or write, and page locked. */ extern void mlock_vma_page(struct page *page); -extern unsigned int munlock_vma_page(struct page *page); +extern void munlock_vma_page(struct page *page); extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len); diff --git a/mm/mlock.c b/mm/mlock.c index 8f584eddd305..aec4ce7919da 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -46,12 +46,6 @@ EXPORT_SYMBOL(can_do_mlock); * be placed on the LRU "unevictable" list, rather than the [in]active lists. * The unevictable list is an LRU sibling list to the [in]active lists. * PageUnevictable is set to indicate the unevictable state. - * - * When lazy mlocking via vmscan, it is important to ensure that the - * vma's VM_LOCKED status is not concurrently being modified, otherwise we - * may have mlocked a page that is being munlocked. So lazy mlock must take - * the mmap_lock for read, and verify that the vma really is locked - * (see mm/rmap.c). */ /* @@ -106,299 +100,28 @@ void mlock_vma_page(struct page *page) } } -/* - * Finish munlock after successful page isolation - * - * Page must be locked. This is a wrapper for page_mlock() - * and putback_lru_page() with munlock accounting. - */ -static void __munlock_isolated_page(struct page *page) -{ - /* - * Optimization: if the page was mapped just once, that's our mapping - * and we don't need to check all the other vmas. - */ - if (page_mapcount(page) > 1) - page_mlock(page); - - /* Did try_to_unlock() succeed or punt? */ - if (!PageMlocked(page)) - count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page)); - - putback_lru_page(page); -} - -/* - * Accounting for page isolation fail during munlock - * - * Performs accounting when page isolation fails in munlock. There is nothing - * else to do because it means some other task has already removed the page - * from the LRU. putback_lru_page() will take care of removing the page from - * the unevictable list, if necessary. vmscan [page_referenced()] will move - * the page back to the unevictable list if some other vma has it mlocked. - */ -static void __munlock_isolation_failed(struct page *page) -{ - int nr_pages = thp_nr_pages(page); - - if (PageUnevictable(page)) - __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); - else - __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); -} - /** * munlock_vma_page - munlock a vma page * @page: page to be unlocked, either a normal page or THP page head - * - * returns the size of the page as a page mask (0 for normal page, - * HPAGE_PMD_NR - 1 for THP head page) - * - * called from munlock()/munmap() path with page supposedly on the LRU. - * When we munlock a page, because the vma where we found the page is being - * munlock()ed or munmap()ed, we want to check whether other vmas hold the - * page locked so that we can leave it on the unevictable lru list and not - * bother vmscan with it. However, to walk the page's rmap list in - * page_mlock() we must isolate the page from the LRU. If some other - * task has removed the page from the LRU, we won't be able to do that. - * So we clear the PageMlocked as we might not get another chance. If we - * can't isolate the page, we leave it for putback_lru_page() and vmscan - * [page_referenced()/try_to_unmap()] to deal with. */ -unsigned int munlock_vma_page(struct page *page) +void munlock_vma_page(struct page *page) { - int nr_pages; - - /* For page_mlock() and to serialize with page migration */ + /* Serialize with page migration */ BUG_ON(!PageLocked(page)); - VM_BUG_ON_PAGE(PageTail(page), page); - - if (!TestClearPageMlocked(page)) { - /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ - return 0; - } - - nr_pages = thp_nr_pages(page); - mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); - - if (!isolate_lru_page(page)) - __munlock_isolated_page(page); - else - __munlock_isolation_failed(page); - - return nr_pages - 1; -} - -/* - * convert get_user_pages() return value to posix mlock() error - */ -static int __mlock_posix_error_return(long retval) -{ - if (retval == -EFAULT) - retval = -ENOMEM; - else if (retval == -ENOMEM) - retval = -EAGAIN; - return retval; -} - -/* - * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() - * - * The fast path is available only for evictable pages with single mapping. - * Then we can bypass the per-cpu pvec and get better performance. - * when mapcount > 1 we need page_mlock() which can fail. - * when !page_evictable(), we need the full redo logic of putback_lru_page to - * avoid leaving evictable page in unevictable list. - * - * In case of success, @page is added to @pvec and @pgrescued is incremented - * in case that the page was previously unevictable. @page is also unlocked. - */ -static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, - int *pgrescued) -{ - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(!PageLocked(page), page); - - if (page_mapcount(page) <= 1 && page_evictable(page)) { - pagevec_add(pvec, page); - if (TestClearPageUnevictable(page)) - (*pgrescued)++; - unlock_page(page); - return true; - } - - return false; -} -/* - * Putback multiple evictable pages to the LRU - * - * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of - * the pages might have meanwhile become unevictable but that is OK. - */ -static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) -{ - count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); - /* - *__pagevec_lru_add() calls release_pages() so we don't call - * put_page() explicitly - */ - __pagevec_lru_add(pvec); - count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); -} - -/* - * Munlock a batch of pages from the same zone - * - * The work is split to two main phases. First phase clears the Mlocked flag - * and attempts to isolate the pages, all under a single zone lru lock. - * The second phase finishes the munlock only for pages where isolation - * succeeded. - * - * Note that the pagevec may be modified during the process. - */ -static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) -{ - int i; - int nr = pagevec_count(pvec); - int delta_munlocked = -nr; - struct pagevec pvec_putback; - struct lruvec *lruvec = NULL; - int pgrescued = 0; - - pagevec_init(&pvec_putback); - - /* Phase 1: page isolation */ - for (i = 0; i < nr; i++) { - struct page *page = pvec->pages[i]; - struct folio *folio = page_folio(page); - - if (TestClearPageMlocked(page)) { - /* - * We already have pin from follow_page_mask() - * so we can spare the get_page() here. - */ - if (TestClearPageLRU(page)) { - lruvec = folio_lruvec_relock_irq(folio, lruvec); - del_page_from_lru_list(page, lruvec); - continue; - } else - __munlock_isolation_failed(page); - } else { - delta_munlocked++; - } + VM_BUG_ON_PAGE(PageTail(page), page); - /* - * We won't be munlocking this page in the next phase - * but we still need to release the follow_page_mask() - * pin. We cannot do it under lru_lock however. If it's - * the last pin, __page_cache_release() would deadlock. - */ - pagevec_add(&pvec_putback, pvec->pages[i]); - pvec->pages[i] = NULL; - } - if (lruvec) { - __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); - unlock_page_lruvec_irq(lruvec); - } else if (delta_munlocked) { - mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); - } + if (TestClearPageMlocked(page)) { + int nr_pages = thp_nr_pages(page); - /* Now we can release pins of pages that we are not munlocking */ - pagevec_release(&pvec_putback); - - /* Phase 2: page munlock */ - for (i = 0; i < nr; i++) { - struct page *page = pvec->pages[i]; - - if (page) { - lock_page(page); - if (!__putback_lru_fast_prepare(page, &pvec_putback, - &pgrescued)) { - /* - * Slow path. We don't want to lose the last - * pin before unlock_page() - */ - get_page(page); /* for putback_lru_page() */ - __munlock_isolated_page(page); - unlock_page(page); - put_page(page); /* from follow_page_mask() */ - } + mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); + if (!isolate_lru_page(page)) { + putback_lru_page(page); + count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); + } else if (PageUnevictable(page)) { + count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); } } - - /* - * Phase 3: page putback for pages that qualified for the fast path - * This will also call put_page() to return pin from follow_page_mask() - */ - if (pagevec_count(&pvec_putback)) - __putback_lru_fast(&pvec_putback, pgrescued); -} - -/* - * Fill up pagevec for __munlock_pagevec using pte walk - * - * The function expects that the struct page corresponding to @start address is - * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. - * - * The rest of @pvec is filled by subsequent pages within the same pmd and same - * zone, as long as the pte's are present and vm_normal_page() succeeds. These - * pages also get pinned. - * - * Returns the address of the next page that should be scanned. This equals - * @start + PAGE_SIZE when no page could be added by the pte walk. - */ -static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, - struct vm_area_struct *vma, struct zone *zone, - unsigned long start, unsigned long end) -{ - pte_t *pte; - spinlock_t *ptl; - - /* - * Initialize pte walk starting at the already pinned page where we - * are sure that there is a pte, as it was pinned under the same - * mmap_lock write op. - */ - pte = get_locked_pte(vma->vm_mm, start, &ptl); - /* Make sure we do not cross the page table boundary */ - end = pgd_addr_end(start, end); - end = p4d_addr_end(start, end); - end = pud_addr_end(start, end); - end = pmd_addr_end(start, end); - - /* The page next to the pinned page is the first we will try to get */ - start += PAGE_SIZE; - while (start < end) { - struct page *page = NULL; - pte++; - if (pte_present(*pte)) - page = vm_normal_page(vma, start, *pte); - /* - * Break if page could not be obtained or the page's node+zone does not - * match - */ - if (!page || page_zone(page) != zone) - break; - - /* - * Do not use pagevec for PTE-mapped THP, - * munlock_vma_pages_range() will handle them. - */ - if (PageTransCompound(page)) - break; - - get_page(page); - /* - * Increase the address that will be returned *before* the - * eventual break due to pvec becoming full by adding the page - */ - start += PAGE_SIZE; - if (pagevec_add(pvec, page) == 0) - break; - } - pte_unmap_unlock(pte, ptl); - return start; } /* @@ -413,75 +136,13 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, * * Returns with VM_LOCKED cleared. Callers must be prepared to * deal with this. - * - * We don't save and restore VM_LOCKED here because pages are - * still on lru. In unmap path, pages might be scanned by reclaim - * and re-mlocked by page_mlock/try_to_unmap before we unmap and - * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { vma->vm_flags &= VM_LOCKED_CLEAR_MASK; - while (start < end) { - struct page *page; - unsigned int page_mask = 0; - unsigned long page_increm; - struct pagevec pvec; - struct zone *zone; - - pagevec_init(&pvec); - /* - * Although FOLL_DUMP is intended for get_dump_page(), - * it just so happens that its special treatment of the - * ZERO_PAGE (returning an error instead of doing get_page) - * suits munlock very well (and if somehow an abnormal page - * has sneaked into the range, we won't oops here: great). - */ - page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); - - if (page && !IS_ERR(page)) { - if (PageTransTail(page)) { - VM_BUG_ON_PAGE(PageMlocked(page), page); - put_page(page); /* follow_page_mask() */ - } else if (PageTransHuge(page)) { - lock_page(page); - /* - * Any THP page found by follow_page_mask() may - * have gotten split before reaching - * munlock_vma_page(), so we need to compute - * the page_mask here instead. - */ - page_mask = munlock_vma_page(page); - unlock_page(page); - put_page(page); /* follow_page_mask() */ - } else { - /* - * Non-huge pages are handled in batches via - * pagevec. The pin from follow_page_mask() - * prevents them from collapsing by THP. - */ - pagevec_add(&pvec, page); - zone = page_zone(page); - - /* - * Try to fill the rest of pagevec using fast - * pte walk. This will also update start to - * the next page to process. Then munlock the - * pagevec. - */ - start = __munlock_pagevec_fill(&pvec, vma, - zone, start, end); - __munlock_pagevec(&pvec, zone); - goto next; - } - } - page_increm = 1 + page_mask; - start += page_increm * PAGE_SIZE; -next: - cond_resched(); - } + /* Reimplementation to follow in later commit */ } /* @@ -645,6 +306,18 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, return count >> PAGE_SHIFT; } +/* + * convert get_user_pages() return value to posix mlock() error + */ +static int __mlock_posix_error_return(long retval) +{ + if (retval == -EFAULT) + retval = -ENOMEM; + else if (retval == -ENOMEM) + retval = -EAGAIN; + return retval; +} + static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; diff --git a/mm/rmap.c b/mm/rmap.c index 6a1e8c7f6213..7ce7f1946cff 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1996,76 +1996,6 @@ void try_to_migrate(struct page *page, enum ttu_flags flags) rmap_walk(page, &rwc); } -/* - * Walks the vma's mapping a page and mlocks the page if any locked vma's are - * found. Once one is found the page is locked and the scan can be terminated. - */ -static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, void *unused) -{ - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; - - /* An un-locked vma doesn't have any pages to lock, continue the scan */ - if (!(vma->vm_flags & VM_LOCKED)) - return true; - - while (page_vma_mapped_walk(&pvmw)) { - /* - * Need to recheck under the ptl to serialise with - * __munlock_pagevec_fill() after VM_LOCKED is cleared in - * munlock_vma_pages_range(). - */ - if (vma->vm_flags & VM_LOCKED) { - /* - * PTE-mapped THP are never marked as mlocked; but - * this function is never called on a DoubleMap THP, - * nor on an Anon THP (which may still be PTE-mapped - * after DoubleMap was cleared). - */ - mlock_vma_page(page); - /* - * No need to scan further once the page is marked - * as mlocked. - */ - page_vma_mapped_walk_done(&pvmw); - return false; - } - } - - return true; -} - -/** - * page_mlock - try to mlock a page - * @page: the page to be mlocked - * - * Called from munlock code. Checks all of the VMAs mapping the page and mlocks - * the page if any are found. The page will be returned with PG_mlocked cleared - * if it is not mapped by any locked vmas. - */ -void page_mlock(struct page *page) -{ - struct rmap_walk_control rwc = { - .rmap_one = page_mlock_one, - .done = page_not_mapped, - .anon_lock = page_lock_anon_vma_read, - - }; - - VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); - VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); - - /* Anon THP are only marked as mlocked when singly mapped */ - if (PageTransCompound(page) && PageAnon(page)) - return; - - rmap_walk(page, &rwc); -} - #ifdef CONFIG_DEVICE_PRIVATE struct make_exclusive_args { struct mm_struct *mm; @@ -2291,11 +2221,6 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. - * - * When called from page_mlock(), the mmap_lock of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * LOCKED. */ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, bool locked) @@ -2344,11 +2269,6 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. - * - * When called from page_mlock(), the mmap_lock of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * LOCKED. */ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, bool locked) -- cgit v1.2.3-71-gd317 From b67bf49ce7aae72f63739abee6ac25f64bf20081 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Feb 2022 18:21:52 -0800 Subject: mm/munlock: delete FOLL_MLOCK and FOLL_POPULATE If counting page mlocks, we must not double-count: follow_page_pte() can tell if a page has already been Mlocked or not, but cannot tell if a pte has already been counted or not: that will have to be done when the pte is mapped in (which lru_cache_add_inactive_or_unevictable() already tracks for new anon pages, but there's no such tracking yet for others). Delete all the FOLL_MLOCK code - faulting in the missing pages will do all that is necessary, without special mlock_vma_page() calls from here. But then FOLL_POPULATE turns out to serve no purpose - it was there so that its absence would tell faultin_page() not to faultin page when setting up VM_LOCKONFAULT areas; but if there's no special work needed here for mlock, then there's no work at all here for VM_LOCKONFAULT. Have I got that right? I've not looked into the history, but see that FOLL_POPULATE goes back before VM_LOCKONFAULT: did it serve a different purpose before? Ah, yes, it was used to skip the old stack guard page. And is it intentional that COW is not broken on existing pages when setting up a VM_LOCKONFAULT area? I can see that being argued either way, and have no reason to disagree with current behaviour. Signed-off-by: Hugh Dickins Acked-by: Vlastimil Babka Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 2 -- mm/gup.c | 43 ++++++++----------------------------------- mm/huge_memory.c | 33 --------------------------------- 3 files changed, 8 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 213cc569b192..74ee50c2033b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2925,13 +2925,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ -#define FOLL_POPULATE 0x40 /* fault in pages (with FOLL_MLOCK) */ #define FOLL_NOFAULT 0x80 /* do not fault in pages */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ -#define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ #define FOLL_ANON 0x8000 /* don't do file mappings */ diff --git a/mm/gup.c b/mm/gup.c index a9d4d724aef7..87fec8a5c10d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -597,32 +597,6 @@ retry: */ mark_page_accessed(page); } - if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { - /* Do not mlock pte-mapped THP */ - if (PageTransCompound(page)) - goto out; - - /* - * The preliminary mapping check is mainly to avoid the - * pointless overhead of lock_page on the ZERO_PAGE - * which might bounce very badly if there is contention. - * - * If the page is already locked, we don't need to - * handle it now - vmscan will handle it later if and - * when it attempts to reclaim the page. - */ - if (page->mapping && trylock_page(page)) { - lru_add_drain(); /* push cached pages to LRU */ - /* - * Because we lock page here, and migration is - * blocked by the pte's page reference, and we - * know the page is still mapped, we don't even - * need to check for file-cache page truncation. - */ - mlock_vma_page(page); - unlock_page(page); - } - } out: pte_unmap_unlock(ptep, ptl); return page; @@ -945,9 +919,6 @@ static int faultin_page(struct vm_area_struct *vma, unsigned int fault_flags = 0; vm_fault_t ret; - /* mlock all present pages, but do not fault in new pages */ - if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) - return -ENOENT; if (*flags & FOLL_NOFAULT) return -EFAULT; if (*flags & FOLL_WRITE) @@ -1198,8 +1169,6 @@ retry: case -ENOMEM: case -EHWPOISON: goto out; - case -ENOENT: - goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { @@ -1497,9 +1466,14 @@ long populate_vma_page_range(struct vm_area_struct *vma, VM_BUG_ON_VMA(end > vma->vm_end, vma); mmap_assert_locked(mm); - gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; + /* + * Rightly or wrongly, the VM_LOCKONFAULT case has never used + * faultin_page() to break COW, so it has no work to do here. + */ if (vma->vm_flags & VM_LOCKONFAULT) - gup_flags &= ~FOLL_POPULATE; + return nr_pages; + + gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW @@ -1566,10 +1540,9 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, * in the page table. * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit * a poisoned page. - * FOLL_POPULATE: Always populate memory with VM_LOCKONFAULT. * !FOLL_FORCE: Require proper access permissions. */ - gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK | FOLL_HWPOISON; + gup_flags = FOLL_TOUCH | FOLL_HWPOISON; if (write) gup_flags |= FOLL_WRITE; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 406a3c28c026..9a34b85ebcf8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1380,39 +1380,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, if (flags & FOLL_TOUCH) touch_pmd(vma, addr, pmd, flags); - if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { - /* - * We don't mlock() pte-mapped THPs. This way we can avoid - * leaking mlocked pages into non-VM_LOCKED VMAs. - * - * For anon THP: - * - * In most cases the pmd is the only mapping of the page as we - * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for - * writable private mappings in populate_vma_page_range(). - * - * The only scenario when we have the page shared here is if we - * mlocking read-only mapping shared over fork(). We skip - * mlocking such pages. - * - * For file THP: - * - * We can expect PageDoubleMap() to be stable under page lock: - * for file pages we set it in page_add_file_rmap(), which - * requires page to be locked. - */ - - if (PageAnon(page) && compound_mapcount(page) != 1) - goto skip_mlock; - if (PageDoubleMap(page) || !page->mapping) - goto skip_mlock; - if (!trylock_page(page)) - goto skip_mlock; - if (page->mapping && !PageDoubleMap(page)) - mlock_vma_page(page); - unlock_page(page); - } -skip_mlock: page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); -- cgit v1.2.3-71-gd317 From cea86fe246b694a191804b47378eb9d77aefabec Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Feb 2022 18:26:39 -0800 Subject: mm/munlock: rmap call mlock_vma_page() munlock_vma_page() Add vma argument to mlock_vma_page() and munlock_vma_page(), make them inline functions which check (vma->vm_flags & VM_LOCKED) before calling mlock_page() and munlock_page() in mm/mlock.c. Add bool compound to mlock_vma_page() and munlock_vma_page(): this is because we have understandable difficulty in accounting pte maps of THPs, and if passed a PageHead page, mlock_page() and munlock_page() cannot tell whether it's a pmd map to be counted or a pte map to be ignored. Add vma arg to page_add_file_rmap() and page_remove_rmap(), like the others, and use that to call mlock_vma_page() at the end of the page adds, and munlock_vma_page() at the end of page_remove_rmap() (end or beginning? unimportant, but end was easier for assertions in testing). No page lock is required (although almost all adds happen to hold it): delete the "Serialize with page migration" BUG_ON(!PageLocked(page))s. Certainly page lock did serialize with page migration, but I'm having difficulty explaining why that was ever important. Mlock accounting on THPs has been hard to define, differed between anon and file, involved PageDoubleMap in some places and not others, required clear_page_mlock() at some points. Keep it simple now: just count the pmds and ignore the ptes, there is no reason for ptes to undo pmd mlocks. page_add_new_anon_rmap() callers unchanged: they have long been calling lru_cache_add_inactive_or_unevictable(), which does its own VM_LOCKED handling (it also checks for not VM_SPECIAL: I think that's overcautious, and inconsistent with other checks, that mmap_region() already prevents VM_LOCKED on VM_SPECIAL; but haven't quite convinced myself to change it). Signed-off-by: Hugh Dickins Acked-by: Vlastimil Babka Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 17 ++++++++------- kernel/events/uprobes.c | 7 ++----- mm/huge_memory.c | 17 +++++++-------- mm/hugetlb.c | 4 ++-- mm/internal.h | 36 ++++++++++++++++++++++++++----- mm/khugepaged.c | 4 ++-- mm/ksm.c | 12 +---------- mm/memory.c | 45 +++++++++++++-------------------------- mm/migrate.c | 9 ++------ mm/mlock.c | 21 +++++++------------ mm/rmap.c | 56 +++++++++++++++++++++++-------------------------- mm/userfaultfd.c | 14 +++++++------ 12 files changed, 113 insertions(+), 129 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index dc48aa8c2c94..ac29b076082b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -167,18 +167,19 @@ struct anon_vma *page_get_anon_vma(struct page *page); */ void page_move_anon_rmap(struct page *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); + unsigned long address, bool compound); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, int); + unsigned long address, int flags); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); -void page_add_file_rmap(struct page *, bool); -void page_remove_rmap(struct page *, bool); - + unsigned long address, bool compound); +void page_add_file_rmap(struct page *, struct vm_area_struct *, + bool compound); +void page_remove_rmap(struct page *, struct vm_area_struct *, + bool compound); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long); + unsigned long address); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long); + unsigned long address); static inline void page_dup_rmap(struct page *page, bool compound) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6357c3580d07..eed2f7437d96 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -173,7 +173,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, return err; } - /* For try_to_free_swap() and munlock_vma_page() below */ + /* For try_to_free_swap() below */ lock_page(old_page); mmu_notifier_invalidate_range_start(&range); @@ -201,13 +201,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); - page_remove_rmap(old_page, false); + page_remove_rmap(old_page, vma, false); if (!page_mapped(old_page)) try_to_free_swap(old_page); page_vma_mapped_walk_done(&pvmw); - - if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) - munlock_vma_page(old_page); put_page(old_page); err = 0; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a34b85ebcf8..d6477f48a27e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1577,7 +1577,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_present(orig_pmd)) { page = pmd_page(orig_pmd); - page_remove_rmap(page, true); + page_remove_rmap(page, vma, true); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(!PageHead(page), page); } else if (thp_migration_supported()) { @@ -1962,7 +1962,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, set_page_dirty(page); if (!PageReferenced(page) && pmd_young(old_pmd)) SetPageReferenced(page); - page_remove_rmap(page, true); + page_remove_rmap(page, vma, true); put_page(page); } add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); @@ -2096,6 +2096,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, } } unlock_page_memcg(page); + + /* Above is effectively page_remove_rmap(page, vma, true) */ + munlock_vma_page(page, vma, true); } smp_wmb(); /* make pte visible before pmd */ @@ -2103,7 +2106,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, if (freeze) { for (i = 0; i < HPAGE_PMD_NR; i++) { - page_remove_rmap(page + i, false); + page_remove_rmap(page + i, vma, false); put_page(page + i); } } @@ -2163,8 +2166,6 @@ repeat: do_unlock_page = true; } } - if (PageMlocked(page)) - clear_page_mlock(page); } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) goto out; __split_huge_pmd_locked(vma, pmd, range.start, freeze); @@ -3138,7 +3139,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, if (pmd_soft_dirty(pmdval)) pmdswp = pmd_swp_mksoft_dirty(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); - page_remove_rmap(page, true); + page_remove_rmap(page, vma, true); put_page(page); } @@ -3168,10 +3169,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (PageAnon(new)) page_add_anon_rmap(new, vma, mmun_start, true); else - page_add_file_rmap(new, true); + page_add_file_rmap(new, vma, true); set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); - if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) - mlock_vma_page(new); update_mmu_cache_pmd(vma, address, pvmw->pmd); } #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 61895cc01d09..43fb3155298e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5014,7 +5014,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct set_page_dirty(page); hugetlb_count_sub(pages_per_huge_page(h), mm); - page_remove_rmap(page, true); + page_remove_rmap(page, vma, true); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); @@ -5259,7 +5259,7 @@ retry_avoidcopy: /* Break COW */ huge_ptep_clear_flush(vma, haddr, ptep); mmu_notifier_invalidate_range(mm, range.start, range.end); - page_remove_rmap(old_page, true); + page_remove_rmap(old_page, vma, true); hugepage_add_new_anon_rmap(new_page, vma, haddr); set_huge_pte_at(mm, haddr, ptep, make_huge_pte(vma, new_page, 1)); diff --git a/mm/internal.h b/mm/internal.h index f235aa92e564..3d7dfc8bc471 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -395,12 +395,35 @@ extern long faultin_vma_page_range(struct vm_area_struct *vma, bool write, int *locked); extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len); - /* - * must be called with vma's mmap_lock held for read or write, and page locked. + * mlock_vma_page() and munlock_vma_page(): + * should be called with vma's mmap_lock held for read or write, + * under page table lock for the pte/pmd being added or removed. + * + * mlock is usually called at the end of page_add_*_rmap(), + * munlock at the end of page_remove_rmap(); but new anon + * pages are managed in lru_cache_add_inactive_or_unevictable(). + * + * @compound is used to include pmd mappings of THPs, but filter out + * pte mappings of THPs, which cannot be consistently counted: a pte + * mapping of the THP head cannot be distinguished by the page alone. */ -extern void mlock_vma_page(struct page *page); -extern void munlock_vma_page(struct page *page); +void mlock_page(struct page *page); +static inline void mlock_vma_page(struct page *page, + struct vm_area_struct *vma, bool compound) +{ + if (unlikely(vma->vm_flags & VM_LOCKED) && + (compound || !PageTransCompound(page))) + mlock_page(page); +} +void munlock_page(struct page *page); +static inline void munlock_vma_page(struct page *page, + struct vm_area_struct *vma, bool compound) +{ + if (unlikely(vma->vm_flags & VM_LOCKED) && + (compound || !PageTransCompound(page))) + munlock_page(page); +} /* * Clear the page's PageMlocked(). This can be useful in a situation where @@ -487,7 +510,10 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, #else /* !CONFIG_MMU */ static inline void unmap_mapping_folio(struct folio *folio) { } static inline void clear_page_mlock(struct page *page) { } -static inline void mlock_vma_page(struct page *page) { } +static inline void mlock_vma_page(struct page *page, + struct vm_area_struct *vma, bool compound) { } +static inline void munlock_vma_page(struct page *page, + struct vm_area_struct *vma, bool compound) { } static inline void vunmap_range_noflush(unsigned long start, unsigned long end) { } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 131492fd1148..52add1825525 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -774,7 +774,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, */ spin_lock(ptl); ptep_clear(vma->vm_mm, address, _pte); - page_remove_rmap(src_page, false); + page_remove_rmap(src_page, vma, false); spin_unlock(ptl); free_page_and_swap_cache(src_page); } @@ -1513,7 +1513,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (pte_none(*pte)) continue; page = vm_normal_page(vma, addr, *pte); - page_remove_rmap(page, false); + page_remove_rmap(page, vma, false); } pte_unmap_unlock(start_pte, ptl); diff --git a/mm/ksm.c b/mm/ksm.c index c20bd4d9a0d9..c5a4403b5dc9 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1177,7 +1177,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, newpte); - page_remove_rmap(page, false); + page_remove_rmap(page, vma, false); if (!page_mapped(page)) try_to_free_swap(page); put_page(page); @@ -1252,16 +1252,6 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, err = replace_page(vma, page, kpage, orig_pte); } - if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { - munlock_vma_page(page); - if (!PageMlocked(kpage)) { - unlock_page(page); - lock_page(kpage); - mlock_vma_page(kpage); - page = kpage; /* for final unlock */ - } - } - out_unlock: unlock_page(page); out: diff --git a/mm/memory.c b/mm/memory.c index c125c4969913..53bd9e5f2e33 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -735,9 +735,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, set_pte_at(vma->vm_mm, address, ptep, pte); - if (vma->vm_flags & VM_LOCKED) - mlock_vma_page(page); - /* * No need to invalidate - it was non-present before. However * secondary CPUs may have mappings that need invalidating. @@ -1377,7 +1374,7 @@ again: mark_page_accessed(page); } rss[mm_counter(page)]--; - page_remove_rmap(page, false); + page_remove_rmap(page, vma, false); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); if (unlikely(__tlb_remove_page(tlb, page))) { @@ -1397,10 +1394,8 @@ again: continue; pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); rss[mm_counter(page)]--; - if (is_device_private_entry(entry)) - page_remove_rmap(page, false); - + page_remove_rmap(page, vma, false); put_page(page); continue; } @@ -1753,16 +1748,16 @@ static int validate_page_before_insert(struct page *page) return 0; } -static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, +static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { if (!pte_none(*pte)) return -EBUSY; /* Ok, finally just insert the thing.. */ get_page(page); - inc_mm_counter_fast(mm, mm_counter_file(page)); - page_add_file_rmap(page, false); - set_pte_at(mm, addr, pte, mk_pte(page, prot)); + inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); + page_add_file_rmap(page, vma, false); + set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } @@ -1776,7 +1771,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { - struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte; spinlock_t *ptl; @@ -1785,17 +1779,17 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, if (retval) goto out; retval = -ENOMEM; - pte = get_locked_pte(mm, addr, &ptl); + pte = get_locked_pte(vma->vm_mm, addr, &ptl); if (!pte) goto out; - retval = insert_page_into_pte_locked(mm, pte, addr, page, prot); + retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); pte_unmap_unlock(pte, ptl); out: return retval; } #ifdef pte_index -static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte, +static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { int err; @@ -1805,7 +1799,7 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte, err = validate_page_before_insert(page); if (err) return err; - return insert_page_into_pte_locked(mm, pte, addr, page, prot); + return insert_page_into_pte_locked(vma, pte, addr, page, prot); } /* insert_pages() amortizes the cost of spinlock operations @@ -1842,7 +1836,7 @@ more: start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { - int err = insert_page_in_batch_locked(mm, pte, + int err = insert_page_in_batch_locked(vma, pte, addr, pages[curr_page_idx], prot); if (unlikely(err)) { pte_unmap_unlock(start_pte, pte_lock); @@ -3098,7 +3092,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ - page_remove_rmap(old_page, false); + page_remove_rmap(old_page, vma, false); } /* Free the old page.. */ @@ -3118,16 +3112,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) */ mmu_notifier_invalidate_range_only_end(&range); if (old_page) { - /* - * Don't let another task, with possibly unlocked vma, - * keep the mlocked page. - */ - if (page_copied && (vma->vm_flags & VM_LOCKED)) { - lock_page(old_page); /* LRU manipulation */ - if (PageMlocked(old_page)) - munlock_vma_page(old_page); - unlock_page(old_page); - } if (page_copied) free_swap_cache(old_page); put_page(old_page); @@ -3947,7 +3931,8 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); - page_add_file_rmap(page, true); + page_add_file_rmap(page, vma, true); + /* * deposit and withdraw with pmd lock held */ @@ -3996,7 +3981,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) lru_cache_add_inactive_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, false); + page_add_file_rmap(page, vma, false); } set_pte_at(vma->vm_mm, addr, vmf->pte, entry); } diff --git a/mm/migrate.c b/mm/migrate.c index c7da064b4781..7c4223ce2500 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -248,14 +248,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, if (PageAnon(new)) page_add_anon_rmap(new, vma, pvmw.address, false); else - page_add_file_rmap(new, false); + page_add_file_rmap(new, vma, false); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } - if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) - mlock_vma_page(new); - - if (PageTransHuge(page) && PageMlocked(page)) - clear_page_mlock(page); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, pvmw.address, pvmw.pte); @@ -2331,7 +2326,7 @@ again: * drop page refcount. Page won't be freed, as we took * a reference just above. */ - page_remove_rmap(page, false); + page_remove_rmap(page, vma, false); put_page(page); if (pte_present(pte)) diff --git a/mm/mlock.c b/mm/mlock.c index 5d7ced8303be..92f28258b4ae 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -78,17 +78,13 @@ void clear_page_mlock(struct page *page) } } -/* - * Mark page as mlocked if not already. - * If page on LRU, isolate and putback to move to unevictable list. +/** + * mlock_page - mlock a page + * @page: page to be mlocked, either a normal page or a THP head. */ -void mlock_vma_page(struct page *page) +void mlock_page(struct page *page) { - /* Serialize with page migration */ - BUG_ON(!PageLocked(page)); - VM_BUG_ON_PAGE(PageTail(page), page); - VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); if (!TestSetPageMlocked(page)) { int nr_pages = thp_nr_pages(page); @@ -101,14 +97,11 @@ void mlock_vma_page(struct page *page) } /** - * munlock_vma_page - munlock a vma page - * @page: page to be unlocked, either a normal page or THP page head + * munlock_page - munlock a page + * @page: page to be munlocked, either a normal page or a THP head. */ -void munlock_vma_page(struct page *page) +void munlock_page(struct page *page) { - /* Serialize with page migration */ - BUG_ON(!PageLocked(page)); - VM_BUG_ON_PAGE(PageTail(page), page); if (TestClearPageMlocked(page)) { diff --git a/mm/rmap.c b/mm/rmap.c index 7ce7f1946cff..6cc8bf129f18 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1181,17 +1181,17 @@ void do_page_add_anon_rmap(struct page *page, __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); } - if (unlikely(PageKsm(page))) { + if (unlikely(PageKsm(page))) unlock_page_memcg(page); - return; - } /* address might be in next vma when migration races vma_adjust */ - if (first) + else if (first) __page_set_anon_rmap(page, vma, address, flags & RMAP_EXCLUSIVE); else __page_check_anon_rmap(page, vma, address); + + mlock_vma_page(page, vma, compound); } /** @@ -1232,12 +1232,14 @@ void page_add_new_anon_rmap(struct page *page, /** * page_add_file_rmap - add pte mapping to a file page - * @page: the page to add the mapping to - * @compound: charge the page as compound or small page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @compound: charge the page as compound or small page * * The caller needs to hold the pte lock. */ -void page_add_file_rmap(struct page *page, bool compound) +void page_add_file_rmap(struct page *page, + struct vm_area_struct *vma, bool compound) { int i, nr = 1; @@ -1260,13 +1262,8 @@ void page_add_file_rmap(struct page *page, bool compound) nr_pages); } else { if (PageTransCompound(page) && page_mapping(page)) { - struct page *head = compound_head(page); - VM_WARN_ON_ONCE(!PageLocked(page)); - - SetPageDoubleMap(head); - if (PageMlocked(page)) - clear_page_mlock(head); + SetPageDoubleMap(compound_head(page)); } if (!atomic_inc_and_test(&page->_mapcount)) goto out; @@ -1274,6 +1271,8 @@ void page_add_file_rmap(struct page *page, bool compound) __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); out: unlock_page_memcg(page); + + mlock_vma_page(page, vma, compound); } static void page_remove_file_rmap(struct page *page, bool compound) @@ -1368,11 +1367,13 @@ static void page_remove_anon_compound_rmap(struct page *page) /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from + * @vma: the vm area from which the mapping is removed * @compound: uncharge the page as compound or small page * * The caller needs to hold the pte lock. */ -void page_remove_rmap(struct page *page, bool compound) +void page_remove_rmap(struct page *page, + struct vm_area_struct *vma, bool compound) { lock_page_memcg(page); @@ -1414,6 +1415,8 @@ void page_remove_rmap(struct page *page, bool compound) */ out: unlock_page_memcg(page); + + munlock_vma_page(page, vma, compound); } /* @@ -1469,28 +1472,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { + /* Unexpected PMD-mapped THP? */ + VM_BUG_ON_PAGE(!pvmw.pte, page); + /* - * If the page is mlock()d, we cannot swap it out. + * If the page is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { - /* - * PTE-mapped THP are never marked as mlocked: so do - * not set it on a DoubleMap THP, nor on an Anon THP - * (which may still be PTE-mapped after DoubleMap was - * cleared). But stop unmapping even in those cases. - */ - if (!PageTransCompound(page) || (PageHead(page) && - !PageDoubleMap(page) && !PageAnon(page))) - mlock_vma_page(page); + /* Restore the mlock which got missed */ + mlock_vma_page(page, vma, false); page_vma_mapped_walk_done(&pvmw); ret = false; break; } - /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_PAGE(!pvmw.pte, page); - subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); address = pvmw.address; @@ -1668,7 +1664,7 @@ discard: * * See Documentation/vm/mmu_notifier.rst */ - page_remove_rmap(subpage, PageHuge(page)); + page_remove_rmap(subpage, vma, PageHuge(page)); put_page(page); } @@ -1942,7 +1938,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * * See Documentation/vm/mmu_notifier.rst */ - page_remove_rmap(subpage, PageHuge(page)); + page_remove_rmap(subpage, vma, PageHuge(page)); put_page(page); } @@ -2078,7 +2074,7 @@ static bool page_make_device_exclusive_one(struct page *page, * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ - page_remove_rmap(subpage, false); + page_remove_rmap(subpage, vma, false); } mmu_notifier_invalidate_range_end(&range); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 0780c2a57ff1..15d3e97a6e04 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -95,10 +95,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, if (!pte_none(*dst_pte)) goto out_unlock; - if (page_in_cache) - page_add_file_rmap(page, false); - else + if (page_in_cache) { + /* Usually, cache pages are already added to LRU */ + if (newly_allocated) + lru_cache_add(page); + page_add_file_rmap(page, dst_vma, false); + } else { page_add_new_anon_rmap(page, dst_vma, dst_addr, false); + lru_cache_add_inactive_or_unevictable(page, dst_vma); + } /* * Must happen after rmap, as mm_counter() checks mapping (via @@ -106,9 +111,6 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, */ inc_mm_counter(dst_mm, mm_counter(page)); - if (newly_allocated) - lru_cache_add_inactive_or_unevictable(page, dst_vma); - set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); /* No need to invalidate - it was non-present before */ -- cgit v1.2.3-71-gd317 From 07ca760673088f262da57ff42c15558688565aa2 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Feb 2022 18:29:54 -0800 Subject: mm/munlock: maintain page->mlock_count while unevictable Previous patches have been preparatory: now implement page->mlock_count. The ordering of the "Unevictable LRU" is of no significance, and there is no point holding unevictable pages on a list: place page->mlock_count to overlay page->lru.prev (since page->lru.next is overlaid by compound_head, which needs to be even so as not to satisfy PageTail - though 2 could be added instead of 1 for each mlock, if that's ever an improvement). But it's only safe to rely on or modify page->mlock_count while lruvec lock is held and page is on unevictable "LRU" - we can save lots of edits by continuing to pretend that there's an imaginary LRU here (there is an unevictable count which still needs to be maintained, but not a list). The mlock_count technique suffers from an unreliability much like with page_mlock(): while someone else has the page off LRU, not much can be done. As before, err on the safe side (behave as if mlock_count 0), and let try_to_unlock_one() move the page to unevictable if reclaim finds out later on - a few misplaced pages don't matter, what we want to avoid is imbalancing reclaim by flooding evictable lists with unevictable pages. I am not a fan of "if (!isolate_lru_page(page)) putback_lru_page(page);": if we have taken lruvec lock to get the page off its present list, then we save everyone trouble (and however many extra atomic ops) by putting it on its destination list immediately. Signed-off-by: Hugh Dickins Acked-by: Vlastimil Babka Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm_inline.h | 11 +++++--- include/linux/mm_types.h | 19 +++++++++++-- mm/huge_memory.c | 5 +++- mm/memcontrol.c | 3 +-- mm/mlock.c | 68 +++++++++++++++++++++++++++++++++++++---------- mm/mmzone.c | 7 +++++ mm/swap.c | 1 + 7 files changed, 92 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index b725839dfe71..884d6f6af05b 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -99,7 +99,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); - list_add(&folio->lru, &lruvec->lists[lru]); + if (lru != LRU_UNEVICTABLE) + list_add(&folio->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list(struct page *page, @@ -115,6 +116,7 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); + /* This is not expected to be used on LRU_UNEVICTABLE */ list_add_tail(&folio->lru, &lruvec->lists[lru]); } @@ -127,8 +129,11 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, static __always_inline void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) { - list_del(&folio->lru); - update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio), + enum lru_list lru = folio_lru_list(folio); + + if (lru != LRU_UNEVICTABLE) + list_del(&folio->lru); + update_lru_size(lruvec, lru, folio_zonenum(folio), -folio_nr_pages(folio)); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5140e5feb486..475bdb282769 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -85,7 +85,16 @@ struct page { * lruvec->lru_lock. Sometimes used as a generic list * by the page owner. */ - struct list_head lru; + union { + struct list_head lru; + /* Or, for the Unevictable "LRU list" slot */ + struct { + /* Always even, to negate PageTail */ + void *__filler; + /* Count page's or folio's mlocks */ + unsigned int mlock_count; + }; + }; /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; pgoff_t index; /* Our offset within mapping. */ @@ -241,7 +250,13 @@ struct folio { struct { /* public: */ unsigned long flags; - struct list_head lru; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; struct address_space *mapping; pgoff_t index; void *private; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d6477f48a27e..9afca0122723 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2300,8 +2300,11 @@ static void lru_add_page_tail(struct page *head, struct page *tail, } else { /* head is still on lru (and we have it frozen) */ VM_WARN_ON(!PageLRU(head)); + if (PageUnevictable(tail)) + tail->mlock_count = 0; + else + list_add_tail(&tail->lru, &head->lru); SetPageLRU(tail); - list_add_tail(&tail->lru, &head->lru); } } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36e9f38c919d..c78b9d3b9c04 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1257,8 +1257,7 @@ struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, * @nr_pages: positive when adding or negative when removing * * This function must be called under lru_lock, just before a page is added - * to or just after a page is removed from an lru list (that ordering being - * so as to allow it to check that lru_size 0 is consistent with list_empty). + * to or just after a page is removed from an lru list. */ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages) diff --git a/mm/mlock.c b/mm/mlock.c index 3c26473050a3..f8a3a54687dd 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -54,16 +54,35 @@ EXPORT_SYMBOL(can_do_mlock); */ void mlock_page(struct page *page) { + struct lruvec *lruvec; + int nr_pages = thp_nr_pages(page); + VM_BUG_ON_PAGE(PageTail(page), page); if (!TestSetPageMlocked(page)) { - int nr_pages = thp_nr_pages(page); - mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); - count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); - if (!isolate_lru_page(page)) - putback_lru_page(page); + __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); + } + + /* There is nothing more we can do while it's off LRU */ + if (!TestClearPageLRU(page)) + return; + + lruvec = folio_lruvec_lock_irq(page_folio(page)); + if (PageUnevictable(page)) { + page->mlock_count++; + goto out; } + + del_page_from_lru_list(page, lruvec); + ClearPageActive(page); + SetPageUnevictable(page); + page->mlock_count = 1; + add_page_to_lru_list(page, lruvec); + __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); +out: + SetPageLRU(page); + unlock_page_lruvec_irq(lruvec); } /** @@ -72,19 +91,40 @@ void mlock_page(struct page *page) */ void munlock_page(struct page *page) { + struct lruvec *lruvec; + int nr_pages = thp_nr_pages(page); + VM_BUG_ON_PAGE(PageTail(page), page); + lock_page_memcg(page); + lruvec = folio_lruvec_lock_irq(page_folio(page)); + if (PageLRU(page) && PageUnevictable(page)) { + /* Then mlock_count is maintained, but might undercount */ + if (page->mlock_count) + page->mlock_count--; + if (page->mlock_count) + goto out; + } + /* else assume that was the last mlock: reclaim will fix it if not */ + if (TestClearPageMlocked(page)) { - int nr_pages = thp_nr_pages(page); - - mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); - if (!isolate_lru_page(page)) { - putback_lru_page(page); - count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); - } else if (PageUnevictable(page)) { - count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); - } + __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); + if (PageLRU(page) || !PageUnevictable(page)) + __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); + else + __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); + } + + /* page_evictable() has to be checked *after* clearing Mlocked */ + if (PageLRU(page) && PageUnevictable(page) && page_evictable(page)) { + del_page_from_lru_list(page, lruvec); + ClearPageUnevictable(page); + add_page_to_lru_list(page, lruvec); + __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } +out: + unlock_page_lruvec_irq(lruvec); + unlock_page_memcg(page); } /* diff --git a/mm/mmzone.c b/mm/mmzone.c index eb89d6e018e2..40e1d9428300 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -81,6 +81,13 @@ void lruvec_init(struct lruvec *lruvec) for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); + /* + * The "Unevictable LRU" is imaginary: though its size is maintained, + * it is never scanned, and unevictable pages are not threaded on it + * (so that their lru fields can be reused to hold mlock_count). + * Poison its list head, so that any operations on it would crash. + */ + list_del(&lruvec->lists[LRU_UNEVICTABLE]); } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) diff --git a/mm/swap.c b/mm/swap.c index ff4810e4a4bc..682a03301a2c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1062,6 +1062,7 @@ static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec) } else { folio_clear_active(folio); folio_set_unevictable(folio); + folio->mlock_count = !!folio_test_mlocked(folio); if (!was_unevictable) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); } -- cgit v1.2.3-71-gd317 From 904b10fb189cc15376e9bfce1ef0282e68b0b004 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Mon, 14 Feb 2022 12:41:08 +0100 Subject: PCI: Add defines for normal and subtractive PCI bridges MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add these PCI class codes to pci_ids.h: PCI_CLASS_BRIDGE_PCI_NORMAL PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE Use these defines in all kernel code for describing PCI class codes for normal and subtractive PCI bridges. [bhelgaas: similar change in pci-mvebu.c] Link: https://lore.kernel.org/r/20220214114109.26809-1-pali@kernel.org Signed-off-by: Pali Rohár Signed-off-by: Bjorn Helgaas --- arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h | 2 -- arch/mips/pci/fixup-sb1250.c | 2 +- arch/mips/pci/pci-bcm63xx.c | 2 +- arch/powerpc/platforms/powernv/pci.c | 2 +- arch/powerpc/sysdev/fsl_pci.c | 2 +- arch/sh/drivers/pci/pcie-sh7786.c | 2 +- drivers/pci/controller/dwc/pci-keystone.c | 8 ++++---- drivers/pci/controller/dwc/pci-meson.c | 16 ++++++++-------- drivers/pci/controller/dwc/pcie-qcom.c | 2 +- drivers/pci/controller/mobiveil/pcie-mobiveil-host.c | 2 +- drivers/pci/controller/pci-aardvark.c | 2 +- drivers/pci/controller/pci-loongson.c | 2 +- drivers/pci/controller/pci-mvebu.c | 2 +- drivers/pci/controller/pci-tegra.c | 2 +- drivers/pci/controller/pcie-iproc-bcma.c | 2 +- drivers/pci/controller/pcie-iproc.c | 2 +- drivers/pci/controller/pcie-mediatek-gen3.c | 2 +- drivers/pci/controller/pcie-rcar-host.c | 2 +- drivers/pci/controller/pcie-rockchip-host.c | 2 +- drivers/pci/controller/pcie-rockchip.h | 1 - drivers/pci/hotplug/shpchp_core.c | 2 +- drivers/pci/pci-bridge-emul.c | 8 +++++--- drivers/pci/pcie/portdrv_pci.c | 4 ++-- include/linux/pci_ids.h | 2 ++ 24 files changed, 38 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 9ceb5e72889f..d3f397dcab6e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -1380,8 +1380,6 @@ #define PCIE_IDVAL3_REG 0x43c #define IDVAL3_CLASS_CODE_MASK 0xffffff -#define IDVAL3_SUBCLASS_SHIFT 8 -#define IDVAL3_CLASS_SHIFT 16 #define PCIE_DLSTATUS_REG 0x1048 #define DLSTATUS_PHYLINKUP (1 << 13) diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c index 40efc990cdce..3f914c33b7de 100644 --- a/arch/mips/pci/fixup-sb1250.c +++ b/arch/mips/pci/fixup-sb1250.c @@ -75,7 +75,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, */ static void quirk_sb1250_ht(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT, quirk_sb1250_ht); diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c index 5548365605c0..ac83243772d2 100644 --- a/arch/mips/pci/pci-bcm63xx.c +++ b/arch/mips/pci/pci-bcm63xx.c @@ -186,7 +186,7 @@ static int __init bcm63xx_register_pcie(void) /* setup class code as bridge */ val = bcm_pcie_readl(PCIE_IDVAL3_REG); val &= ~IDVAL3_CLASS_CODE_MASK; - val |= (PCI_CLASS_BRIDGE_PCI << IDVAL3_SUBCLASS_SHIFT); + val |= PCI_CLASS_BRIDGE_PCI_NORMAL; bcm_pcie_writel(val, PCIE_IDVAL3_REG); /* disable bar1 size */ diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 9a8391b983d1..f7054879ecd4 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -815,7 +815,7 @@ void pnv_pci_shutdown(void) /* Fixup wrong class code in p7ioc and p8 root complex */ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 674f047b7820..a97ce602394e 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -55,7 +55,7 @@ static void quirk_fsl_pcie_early(struct pci_dev *dev) if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) return; - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; fsl_pcie_bus_fixup = 1; return; } diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index 4d499476c33a..b0c2a5238d04 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -314,7 +314,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port) * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ - pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI << 16, SH4A_PCIEIDSETR1); + pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, SH4A_PCIEIDSETR1); /* Initialize default capabilities. */ data = pci_read_reg(chan, SH4A_PCIEEXPCAP0); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 1c2ee4e13f1c..d10e5fd0f83c 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -531,13 +531,13 @@ static void ks_pcie_quirk(struct pci_dev *dev) struct pci_dev *bridge; static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { 0, }, }; diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 686ded034f22..f44bf347904a 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -313,14 +313,14 @@ static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, * cannot program the PCI_CLASS_DEVICE register, so we must fabricate * the return value in the config accessors. */ - if (where == PCI_CLASS_REVISION && size == 4) - *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff); - else if (where == PCI_CLASS_DEVICE && size == 2) - *val = PCI_CLASS_BRIDGE_PCI; - else if (where == PCI_CLASS_DEVICE && size == 1) - *val = PCI_CLASS_BRIDGE_PCI & 0xff; - else if (where == PCI_CLASS_DEVICE + 1 && size == 1) - *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff; + if ((where & ~3) == PCI_CLASS_REVISION) { + if (size <= 2) + *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3)); + *val &= ~0xffffff00; + *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + } return PCIBIOS_SUCCESSFUL; } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index c19cd506ed3f..a47f1c0434c2 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1634,7 +1634,7 @@ static const struct of_device_id qcom_pcie_match[] = { static void qcom_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index f3547aa60140..31a7bdebe540 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -295,7 +295,7 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) /* fixup for PCIe class register */ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); value &= 0xff; - value |= (PCI_CLASS_BRIDGE_PCI << 16); + value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); return 0; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 4f5b44827d21..2561326e9181 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -529,7 +529,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) */ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); reg &= ~0xffffff00; - reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); /* Disable Root Bridge I/O space, memory space and bus mastering */ diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 48169b1e3817..50a8e1d6f70a 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -35,7 +35,7 @@ struct loongson_pci { /* Fixup wrong class code in PCIe bridges */ static void bridge_class_quirk(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_0, bridge_class_quirk); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 71258ea3d35f..b0ec1c640df7 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -268,7 +268,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) */ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); dev_rev &= ~0xffffff00; - dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); /* Point PCIe unit MBUS decode windows to DRAM space. */ diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index cb0aa65d6934..0457ec02ab70 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -726,7 +726,7 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port) /* Tegra PCIE root complex wrongly reports device class */ static void tegra_pcie_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c index 54b6e6d5bc64..99a99900444d 100644 --- a/drivers/pci/controller/pcie-iproc-bcma.c +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -18,7 +18,7 @@ /* NS: CLASS field is R/O, and set to wrong 0x200 value */ static void bcma_pcie2_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index b3e75bc61ff1..3df4ab209253 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -1581,7 +1581,7 @@ static void quirk_paxc_bridge(struct pci_dev *pdev) * code that the bridge is not an Ethernet device. */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - pdev->class = PCI_CLASS_BRIDGE_PCI << 8; + pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; /* * MPSS is not being set properly (as it is currently 0). This is diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 7705d61fba4c..3e8d70bfabc6 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -292,7 +292,7 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) /* Set class code */ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); - val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); + val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL); writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); /* Mask all INTx interrupts */ diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 38b6e02edfa9..dfca59c4ae34 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -370,7 +370,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ - rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1); /* * Setup Secondary Bus Number & Subordinate Bus Number, even though diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 45a28880f322..7f56f99b4116 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -370,7 +370,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, - PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCI_CLASS_BRIDGE_PCI_NORMAL << 8, PCIE_RC_CONFIG_RID_CCR); /* Clear THP cap's next cap pointer to remove L1 substate cap */ diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 1650a5087450..32c3a859c26b 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -134,7 +134,6 @@ #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_SCC_SHIFT 16 #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 81a918d47895..53692b048301 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -312,7 +312,7 @@ static void shpc_remove(struct pci_dev *dev) } static const struct pci_device_id shpcd_pci_tbl[] = { - {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)}, + {PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0)}, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl); diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index c994ebec2360..ec6ab03ae476 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -328,10 +328,12 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END); /* - * class_revision: Class is high 24 bits and revision is low 8 bit of this member, - * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8 + * class_revision: Class is high 24 bits and revision is low 8 bit + * of this member, while class for PCI Bridge Normal Decode has the + * 24-bit value: PCI_CLASS_BRIDGE_PCI_NORMAL */ - bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8); + bridge->conf.class_revision |= + cpu_to_le32(PCI_CLASS_BRIDGE_PCI_NORMAL << 8); bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; bridge->conf.cache_line_size = 0x10; bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST); diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 35eca6277a96..4b8801656ffb 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -178,9 +178,9 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev) */ static const struct pci_device_id port_pci_ids[] = { /* handle any PCI-Express port */ - { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, + { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL, ~0) }, /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ - { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, + { PCI_DEVICE_CLASS(PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, ~0) }, /* handle any Root Complex Event Collector */ { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) }, { }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index aad54c666407..130949c3b486 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -60,6 +60,8 @@ #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400 +#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 -- cgit v1.2.3-71-gd317 From 091296d30917f6e63a9402974f546412dfb2a8e6 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Sat, 5 Feb 2022 11:21:36 +0200 Subject: iwlwifi: mvm: refactor setting PPE thresholds in STA_HE_CTXT_CMD We are setting the PPE Thresholds in STA_HE_CTXT_CMD according to HE PHY Capabilities IE. As EHT is introduced, we will have to set this thresholds according to EHT PHY Capabilities IE if we're in an EHT connection. Some parts of the code can be used for both HE and EHT. Put this parts in functions which will be used in the patch which adds support for EHT PPE. Signed-off-by: Miri Korenblit Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/iwlwifi.20220205112029.48a508dfffef.If392e44d88f96ebed7fadf827e327194d4bd97b1@changeid Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 204 ++++++++++++---------- include/linux/ieee80211.h | 1 + 2 files changed, 114 insertions(+), 91 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1d74e8b3576a..8b0124a40ee9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -2086,6 +2086,103 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) return res; } +static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, + struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, + u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) +{ + int i; + + /* + * FW currently supports only nss == MAX_HE_SUPP_NSS + * + * If nss > MAX: we can ignore values we don't support + * If nss < MAX: we can set zeros in other streams + */ + if (nss > MAX_HE_SUPP_NSS) { + IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); + nss = MAX_HE_SUPP_NSS; + } + + for (i = 0; i < nss; i++) { + u8 ru_index_tmp = ru_index_bitmap << 1; + u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; + u8 bw; + + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + ru_index_tmp >>= 1; + + if (!(ru_index_tmp & 1)) + continue; + + high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } +} + +static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_he_pkt_ext_v2 *pkt_ext) +{ + u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 ru_index_bitmap = + u8_get_bits(*ppe, + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); +} + +static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding, + u32 *flags) +{ + int low_th = -1; + int high_th = -1; + int i; + + switch (nominal_padding) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: + low_th = IWL_HE_PKT_EXT_BPSK; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_BPSK; + break; + } + + /* Set the PPE thresholds accordingly */ + if (low_th >= 0 && high_th >= 0) { + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; + + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } + + *flags |= STA_CTXT_HE_PACKET_EXT; + } +} + static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { @@ -2195,100 +2292,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ - memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); + memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, + sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { - u8 nss = (sta->he_cap.ppe_thres[0] & - IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 ru_index_bitmap = - (sta->he_cap.ppe_thres[0] & - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; - u8 *ppe = &sta->he_cap.ppe_thres[0]; - u8 ppe_pos_bit = 7; /* Starting after PPE header */ - - /* - * FW currently supports only nss == MAX_HE_SUPP_NSS - * - * If nss > MAX: we can ignore values we don't support - * If nss < MAX: we can set zeros in other streams - */ - if (nss > MAX_HE_SUPP_NSS) { - IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, - MAX_HE_SUPP_NSS); - nss = MAX_HE_SUPP_NSS; - } - - for (i = 0; i < nss; i++) { - u8 ru_index_tmp = ru_index_bitmap << 1; - u8 bw; - - for (bw = 0; - bw < ARRAY_SIZE(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i]); - bw++) { - ru_index_tmp >>= 1; - if (!(ru_index_tmp & 1)) - continue; - - sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = - iwl_mvm_he_get_ppe_val(ppe, - ppe_pos_bit); - ppe_pos_bit += - IEEE80211_PPE_THRES_INFO_PPET_SIZE; - sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = - iwl_mvm_he_get_ppe_val(ppe, - ppe_pos_bit); - ppe_pos_bit += - IEEE80211_PPE_THRES_INFO_PPET_SIZE; - } - } - + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, + &sta_ctxt_cmd.pkt_ext); flags |= STA_CTXT_HE_PACKET_EXT; - } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], - IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK) - != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) { - int low_th = -1; - int high_th = -1; - - /* Take the PPE thresholds from the nominal padding info */ - switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], - IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) { - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: - low_th = IWL_HE_PKT_EXT_NONE; - high_th = IWL_HE_PKT_EXT_NONE; - break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: - low_th = IWL_HE_PKT_EXT_BPSK; - high_th = IWL_HE_PKT_EXT_NONE; - break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: - low_th = IWL_HE_PKT_EXT_NONE; - high_th = IWL_HE_PKT_EXT_BPSK; - break; - } - - /* Set the PPE thresholds accordingly */ - if (low_th >= 0 && high_th >= 0) { - struct iwl_he_pkt_ext_v2 *pkt_ext = - &sta_ctxt_cmd.pkt_ext; - - for (i = 0; i < MAX_HE_SUPP_NSS; i++) { - u8 bw; - - for (bw = 0; - bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); - bw++) { - pkt_ext->pkt_ext_qam_th[i][bw][0] = - low_th; - pkt_ext->pkt_ext_qam_th[i][bw][1] = - high_th; - } - } - - flags |= STA_CTXT_HE_PACKET_EXT; - } + /* PPE Thresholds doesn't exist - set the API PPE values + * according to Common Nominal Packet Padding fiels. */ + } else { + u8 nominal_padding = + u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) + iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, + nominal_padding, + &flags); } if (sta->he_cap.he_cap_elem.mac_cap_info[2] & diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a4143466cb32..75d40acb60c1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2421,6 +2421,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) #define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) +#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7) /* * Calculate 802.11ax HE capabilities IE PPE field size -- cgit v1.2.3-71-gd317 From ec87cf3782f7b05ae15e061d36c763c35488f292 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Wed, 2 Feb 2022 23:58:21 +0300 Subject: ata: libata: make ata_host_suspend() *void* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ata_host_suspend() always returns 0, so the result checks in many drivers look pointless. Let's make this function return *void* instead of *int*. Found by Linux Verification Center (linuxtesting.org) with the SVACE static analysis tool. Signed-off-by: Sergey Shtylyov Acked-by: Uwe Kleine-König Reviewed-by: Hannes Reinecke Signed-off-by: Damien Le Moal --- drivers/ata/ahci.c | 3 ++- drivers/ata/ata_piix.c | 5 +---- drivers/ata/libahci_platform.c | 3 ++- drivers/ata/libata-core.c | 8 ++------ drivers/ata/pata_arasan_cf.c | 3 ++- drivers/ata/pata_cs5520.c | 5 +---- drivers/ata/pata_imx.c | 15 ++++++--------- drivers/ata/pata_macio.c | 6 +----- drivers/ata/pata_mpc52xx.c | 3 ++- drivers/ata/pata_samsung_cf.c | 3 ++- drivers/ata/pata_triflex.c | 5 +---- drivers/ata/sata_fsl.c | 4 +++- drivers/ata/sata_highbank.c | 3 ++- drivers/ata/sata_mv.c | 6 +++--- drivers/ata/sata_rcar.c | 18 ++++++++---------- include/linux/libata.h | 2 +- 16 files changed, 39 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index edca4e8fd44e..51045fd05243 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -891,7 +891,8 @@ static int ahci_pci_device_suspend(struct device *dev) } ahci_pci_disable_interrupts(host); - return ata_host_suspend(host, PMSG_SUSPEND); + ata_host_suspend(host, PMSG_SUSPEND); + return 0; } static int ahci_pci_device_resume(struct device *dev) diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 27b0d903f91f..ade5e894563b 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -993,11 +993,8 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); unsigned long flags; - int rc = 0; - rc = ata_host_suspend(host, mesg); - if (rc) - return rc; + ata_host_suspend(host, mesg); /* Some braindamaged ACPI suspend implementations expect the * controller to be awake on entry; otherwise, it burns cpu diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 18296443ccba..65227ef6b846 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -733,7 +733,8 @@ int ahci_platform_suspend_host(struct device *dev) if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS) ahci_platform_disable_phys(hpriv); - return ata_host_suspend(host, PMSG_SUSPEND); + ata_host_suspend(host, PMSG_SUSPEND); + return 0; } EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3ceda1fa243d..0ddb49f2ece1 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5170,10 +5170,9 @@ EXPORT_SYMBOL_GPL(ata_sas_port_resume); * * Suspend @host. Actual operation is performed by port suspend. */ -int ata_host_suspend(struct ata_host *host, pm_message_t mesg) +void ata_host_suspend(struct ata_host *host, pm_message_t mesg) { host->dev->power.power_state = mesg; - return 0; } EXPORT_SYMBOL_GPL(ata_host_suspend); @@ -6090,11 +6089,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); - int rc = 0; - rc = ata_host_suspend(host, mesg); - if (rc) - return rc; + ata_host_suspend(host, mesg); ata_pci_device_do_suspend(pdev, mesg); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 24c3d5e1fca3..e89617ed9175 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -937,7 +937,8 @@ static int arasan_cf_suspend(struct device *dev) dmaengine_terminate_all(acdev->dma_chan); cf_exit(acdev); - return ata_host_suspend(host, PMSG_SUSPEND); + ata_host_suspend(host, PMSG_SUSPEND); + return 0; } static int arasan_cf_resume(struct device *dev) diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 24ce8665b1f9..f4289a532f87 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -259,11 +259,8 @@ static int cs5520_reinit_one(struct pci_dev *pdev) static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); - int rc = 0; - rc = ata_host_suspend(host, mesg); - if (rc) - return rc; + ata_host_suspend(host, mesg); pci_save_state(pdev); return 0; diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c index 2e538726802b..150939275b1b 100644 --- a/drivers/ata/pata_imx.c +++ b/drivers/ata/pata_imx.c @@ -223,17 +223,14 @@ static int pata_imx_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct pata_imx_priv *priv = host->private_data; - int ret; - ret = ata_host_suspend(host, PMSG_SUSPEND); - if (!ret) { - __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); - priv->ata_ctl = - __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); - clk_disable_unprepare(priv->clk); - } + ata_host_suspend(host, PMSG_SUSPEND); - return ret; + __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); + priv->ata_ctl = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); + clk_disable_unprepare(priv->clk); + + return 0; } static int pata_imx_resume(struct device *dev) diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 16e8aa184a75..3c3509f18cdd 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -853,12 +853,8 @@ static int pata_macio_slave_config(struct scsi_device *sdev) #ifdef CONFIG_PM_SLEEP static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg) { - int rc; - /* First, core libata suspend to do most of the work */ - rc = ata_host_suspend(priv->host, mesg); - if (rc) - return rc; + ata_host_suspend(priv->host, mesg); /* Restore to default timings */ pata_macio_default_timings(priv); diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index f1d352d5f128..c1b138d24b05 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -824,7 +824,8 @@ mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state) { struct ata_host *host = platform_get_drvdata(op); - return ata_host_suspend(host, state); + ata_host_suspend(host, state); + return 0; } static int diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 3da0e8e30286..377cd6bd87ac 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -608,7 +608,8 @@ static int pata_s3c_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); - return ata_host_suspend(host, PMSG_SUSPEND); + ata_host_suspend(host, PMSG_SUSPEND); + return 0; } static int pata_s3c_resume(struct device *dev) diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 8a033598e7e1..782162d2f3f8 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -198,11 +198,8 @@ static const struct pci_device_id triflex[] = { static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); - int rc = 0; - rc = ata_host_suspend(host, mesg); - if (rc) - return rc; + ata_host_suspend(host, mesg); /* * We must not disable or powerdown the device. diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index da0152116d9f..cfe93c420488 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1546,7 +1546,9 @@ static int sata_fsl_remove(struct platform_device *ofdev) static int sata_fsl_suspend(struct platform_device *op, pm_message_t state) { struct ata_host *host = platform_get_drvdata(op); - return ata_host_suspend(host, state); + + ata_host_suspend(host, state); + return 0; } static int sata_fsl_resume(struct platform_device *op) diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index b29d3f1d64b0..077bce76c445 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -587,7 +587,8 @@ static int ahci_highbank_suspend(struct device *dev) writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ - return ata_host_suspend(host, PMSG_SUSPEND); + ata_host_suspend(host, PMSG_SUSPEND); + return 0; } static int ahci_highbank_resume(struct device *dev) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 53446b997740..3b2a0d228306 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -4235,10 +4235,10 @@ static int mv_platform_remove(struct platform_device *pdev) static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct ata_host *host = platform_get_drvdata(pdev); + if (host) - return ata_host_suspend(host, state); - else - return 0; + ata_host_suspend(host, state); + return 0; } static int mv_platform_resume(struct platform_device *pdev) diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 3d96b6faa3f0..be8bfefed663 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -945,19 +945,17 @@ static int sata_rcar_suspend(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct sata_rcar_priv *priv = host->private_data; void __iomem *base = priv->base; - int ret; - ret = ata_host_suspend(host, PMSG_SUSPEND); - if (!ret) { - /* disable interrupts */ - iowrite32(0, base + ATAPI_INT_ENABLE_REG); - /* mask */ - iowrite32(priv->sataint_mask, base + SATAINTMASK_REG); + ata_host_suspend(host, PMSG_SUSPEND); - pm_runtime_put(dev); - } + /* disable interrupts */ + iowrite32(0, base + ATAPI_INT_ENABLE_REG); + /* mask */ + iowrite32(priv->sataint_mask, base + SATAINTMASK_REG); - return ret; + pm_runtime_put(dev); + + return 0; } static int sata_rcar_resume(struct device *dev) diff --git a/include/linux/libata.h b/include/linux/libata.h index 605756f645be..a49e75c4206d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1080,7 +1080,7 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, extern bool ata_link_online(struct ata_link *link); extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM -extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); +extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg); extern void ata_host_resume(struct ata_host *host); extern void ata_sas_port_suspend(struct ata_port *ap); extern void ata_sas_port_resume(struct ata_port *ap); -- cgit v1.2.3-71-gd317 From 47f0bd5032106469827cf56c8b45bb9101112105 Mon Sep 17 00:00:00 2001 From: Jacques de Laval Date: Thu, 17 Feb 2022 16:02:02 +0100 Subject: net: Add new protocol attribute to IP addresses This patch adds a new protocol attribute to IPv4 and IPv6 addresses. Inspiration was taken from the protocol attribute of routes. User space applications like iproute2 can set/get the protocol with the Netlink API. The attribute is stored as an 8-bit unsigned integer. The protocol attribute is set by kernel for these categories: - IPv4 and IPv6 loopback addresses - IPv6 addresses generated from router announcements - IPv6 link local addresses User space may pass custom protocols, not defined by the kernel. Grouping addresses on their origin is useful in scenarios where you want to distinguish between addresses based on who added them, e.g. kernel vs. user space. Tagging addresses with a string label is an existing feature that could be used as a solution. Unfortunately the max length of a label is 15 characters, and for compatibility reasons the label must be prefixed with the name of the device followed by a colon. Since device names also have a max length of 15 characters, only -1 characters is guaranteed to be available for any origin tag, which is not that much. A reference implementation of user space setting and getting protocols is available for iproute2: https://github.com/westermo/iproute2/commit/9a6ea18bd79f47f293e5edc7780f315ea42ff540 Signed-off-by: Jacques de Laval Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20220217150202.80802-1-Jacques.De.Laval@westermo.com Signed-off-by: Jakub Kicinski --- include/linux/inetdevice.h | 1 + include/net/addrconf.h | 2 ++ include/net/if_inet6.h | 2 ++ include/uapi/linux/if_addr.h | 9 ++++++++- net/ipv4/devinet.c | 7 +++++++ net/ipv6/addrconf.c | 27 +++++++++++++++++++++------ 6 files changed, 41 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 674aeead6260..ead323243e7b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -150,6 +150,7 @@ struct in_ifaddr { __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; + unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 59940e230b78..f7506f08e505 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -64,6 +64,8 @@ struct ifa6_config { const struct in6_addr *pfx; unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; u32 rt_priority; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index f026cf08a8e8..4cfdef6ca4f6 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -71,6 +71,8 @@ struct inet6_ifaddr { bool tokenized; + u8 ifa_proto; + struct rcu_head rcu; struct in6_addr peer_addr; }; diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index dfcf3ce0097f..1c392dd95a5e 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -33,8 +33,9 @@ enum { IFA_CACHEINFO, IFA_MULTICAST, IFA_FLAGS, - IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ + IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ IFA_TARGET_NETNSID, + IFA_PROTO, /* u8, address protocol */ __IFA_MAX, }; @@ -69,4 +70,10 @@ struct ifa_cacheinfo { #define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) #endif +/* ifa_proto */ +#define IFAPROT_UNSPEC 0 +#define IFAPROT_KERNEL_LO 1 /* loopback */ +#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */ +#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */ + #endif diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index fba2bffd65f7..53a6b14dc50a 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -104,6 +104,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { [IFA_FLAGS] = { .type = NLA_U32 }, [IFA_RT_PRIORITY] = { .type = NLA_U32 }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, + [IFA_PROTO] = { .type = NLA_U8 }, }; struct inet_fill_args { @@ -889,6 +890,9 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, if (tb[IFA_RT_PRIORITY]) ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); + if (tb[IFA_PROTO]) + ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]); + if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ci; @@ -1625,6 +1629,7 @@ static size_t inet_nlmsg_size(void) + nla_total_size(4) /* IFA_BROADCAST */ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(4) /* IFA_FLAGS */ + + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ } @@ -1699,6 +1704,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || (ifa->ifa_label[0] && nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) || + (ifa->ifa_proto && + nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) || nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) || (ifa->ifa_rt_priority && nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) || diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2f307da17f21..85bab3a35709 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1115,6 +1115,7 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, ifa->prefix_len = cfg->plen; ifa->rt_priority = cfg->rt_priority; ifa->flags = cfg->ifa_flags; + ifa->ifa_proto = cfg->ifa_proto; /* No need to add the TENTATIVE flag for addresses with NODAD */ if (!(cfg->ifa_flags & IFA_F_NODAD)) ifa->flags |= IFA_F_TENTATIVE; @@ -2593,6 +2594,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, .valid_lft = valid_lft, .preferred_lft = prefered_lft, .scope = addr_type & IPV6_ADDR_SCOPE_MASK, + .ifa_proto = IFAPROT_KERNEL_RA }; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD @@ -3077,7 +3079,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg) } static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, - int plen, int scope) + int plen, int scope, u8 proto) { struct inet6_ifaddr *ifp; struct ifa6_config cfg = { @@ -3086,7 +3088,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, .ifa_flags = IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, - .scope = scope + .scope = scope, + .ifa_proto = proto }; ifp = ipv6_add_addr(idev, &cfg, true, NULL); @@ -3131,7 +3134,7 @@ static void add_v4_addrs(struct inet6_dev *idev) } if (addr.s6_addr32[3]) { - add_addr(idev, &addr, plen, scope); + add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); return; @@ -3154,7 +3157,8 @@ static void add_v4_addrs(struct inet6_dev *idev) flag |= IFA_HOST; } - add_addr(idev, &addr, plen, flag); + add_addr(idev, &addr, plen, flag, + IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); } @@ -3177,7 +3181,7 @@ static void init_loopback(struct net_device *dev) return; } - add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO); } void addrconf_add_linklocal(struct inet6_dev *idev, @@ -3189,7 +3193,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev, .ifa_flags = flags | IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, - .scope = IFA_LINK + .scope = IFA_LINK, + .ifa_proto = IFAPROT_KERNEL_LL }; struct inet6_ifaddr *ifp; @@ -4627,6 +4632,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, + [IFA_PROTO] = { .type = NLA_U8 }, }; static int @@ -4752,6 +4758,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, ifp->tstamp = jiffies; ifp->valid_lft = cfg->valid_lft; ifp->prefered_lft = cfg->preferred_lft; + ifp->ifa_proto = cfg->ifa_proto; if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) ifp->rt_priority = cfg->rt_priority; @@ -4845,6 +4852,9 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[IFA_RT_PRIORITY]) cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); + if (tb[IFA_PROTO]) + cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]); + cfg.valid_lft = INFINITY_LIFE_TIME; cfg.preferred_lft = INFINITY_LIFE_TIME; @@ -4948,6 +4958,7 @@ static inline int inet6_ifaddr_msgsize(void) + nla_total_size(16) /* IFA_ADDRESS */ + nla_total_size(sizeof(struct ifa_cacheinfo)) + nla_total_size(4) /* IFA_FLAGS */ + + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */; } @@ -5025,6 +5036,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0) goto error; + if (ifa->ifa_proto && + nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) + goto error; + nlmsg_end(skb, nlh); return 0; -- cgit v1.2.3-71-gd317 From b1e8206582f9d680cff7d04828708c8b6ab32957 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Feb 2022 10:16:57 +0100 Subject: sched: Fix yet more sched_fork() races Where commit 4ef0c5c6b5ba ("kernel/sched: Fix sched_fork() access an invalid sched_task_group") fixed a fork race vs cgroup, it opened up a race vs syscalls by not placing the task on the runqueue before it gets exposed through the pidhash. Commit 13765de8148f ("sched/fair: Fix fault in reweight_entity") is trying to fix a single instance of this, instead fix the whole class of issues, effectively reverting this commit. Fixes: 4ef0c5c6b5ba ("kernel/sched: Fix sched_fork() access an invalid sched_task_group") Reported-by: Linus Torvalds Signed-off-by: Peter Zijlstra (Intel) Tested-by: Tadeusz Struk Tested-by: Zhang Qiao Tested-by: Dietmar Eggemann Link: https://lkml.kernel.org/r/YgoeCbwj5mbCR0qA@hirez.programming.kicks-ass.net --- include/linux/sched/task.h | 4 ++-- kernel/fork.c | 13 ++++++++++++- kernel/sched/core.c | 34 +++++++++++++++++++++------------- 3 files changed, 35 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index b9198a1b3a84..e84e54d1b490 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -54,8 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_post_fork(struct task_struct *p, - struct kernel_clone_args *kargs); +extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); diff --git a/kernel/fork.c b/kernel/fork.c index d75a528f7b21..c607d238fc23 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2266,6 +2266,17 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_put_pidfd; + /* + * Now that the cgroups are pinned, re-clone the parent cgroup and put + * the new task on the correct runqueue. All this *before* the task + * becomes visible. + * + * This isn't part of ->can_fork() because while the re-cloning is + * cgroup specific, it unconditionally needs to place the task on a + * runqueue. + */ + sched_cgroup_fork(p, args); + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do @@ -2376,7 +2387,7 @@ static __latent_entropy struct task_struct *copy_process( write_unlock_irq(&tasklist_lock); proc_fork_connector(p); - sched_post_fork(p, args); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fcf0c180617c..9745613d531c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1214,9 +1214,8 @@ int tg_nop(struct task_group *tg, void *data) } #endif -static void set_load_weight(struct task_struct *p) +static void set_load_weight(struct task_struct *p, bool update_load) { - bool update_load = !(READ_ONCE(p->__state) & TASK_NEW); int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; @@ -4407,7 +4406,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->static_prio = NICE_TO_PRIO(0); p->prio = p->normal_prio = p->static_prio; - set_load_weight(p); + set_load_weight(p, false); /* * We don't need the reset flag anymore after the fork. It has @@ -4425,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) init_entity_runnable_average(&p->se); + #ifdef CONFIG_SCHED_INFO if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); @@ -4440,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } -void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) +void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) { unsigned long flags; -#ifdef CONFIG_CGROUP_SCHED - struct task_group *tg; -#endif + /* + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly + * required yet, but lockdep gets upset if rules are violated. + */ raw_spin_lock_irqsave(&p->pi_lock, flags); #ifdef CONFIG_CGROUP_SCHED - tg = container_of(kargs->cset->subsys[cpu_cgrp_id], - struct task_group, css); - p->sched_task_group = autogroup_task_group(p, tg); + if (1) { + struct task_group *tg; + tg = container_of(kargs->cset->subsys[cpu_cgrp_id], + struct task_group, css); + tg = autogroup_task_group(p, tg); + p->sched_task_group = tg; + } #endif rseq_migrate(p); /* @@ -4462,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) if (p->sched_class->task_fork) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); +} +void sched_post_fork(struct task_struct *p) +{ uclamp_post_fork(p); } @@ -6922,7 +6930,7 @@ void set_user_nice(struct task_struct *p, long nice) put_prev_task(rq, p); p->static_prio = NICE_TO_PRIO(nice); - set_load_weight(p); + set_load_weight(p, true); old_prio = p->prio; p->prio = effective_prio(p); @@ -7213,7 +7221,7 @@ static void __setscheduler_params(struct task_struct *p, */ p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); - set_load_weight(p); + set_load_weight(p, true); } /* @@ -9446,7 +9454,7 @@ void __init sched_init(void) #endif } - set_load_weight(&init_task); + set_load_weight(&init_task, false); /* * The boot idle thread does lazy MMU switching as well: -- cgit v1.2.3-71-gd317 From 8a69fe0be143b0a1af829f85f0e9a1ae7d6a04db Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Feb 2022 16:52:11 +0000 Subject: sched/preempt: Refactor sched_dynamic_update() Currently sched_dynamic_update needs to open-code the enabled/disabled function names for each preemption model it supports, when in practice this is a boolean enabled/disabled state for each function. Make this clearer and avoid repetition by defining the enabled/disabled states at the function definition, and using helper macros to perform the static_call_update(). Where x86 currently overrides the enabled function, it is made to provide both the enabled and disabled states for consistency, with defaults provided by the core code otherwise. In subsequent patches this will allow us to support PREEMPT_DYNAMIC without static calls. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Acked-by: Ard Biesheuvel Acked-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20220214165216.2231574-3-mark.rutland@arm.com --- arch/x86/include/asm/preempt.h | 10 ++++--- include/linux/entry-common.h | 2 ++ kernel/sched/core.c | 59 ++++++++++++++++++++++++++---------------- 3 files changed, 45 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index fe5efbcba824..5f6daea1ee24 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -108,16 +108,18 @@ static __always_inline bool should_resched(int preempt_offset) extern asmlinkage void preempt_schedule(void); extern asmlinkage void preempt_schedule_thunk(void); -#define __preempt_schedule_func preempt_schedule_thunk +#define preempt_schedule_dynamic_enabled preempt_schedule_thunk +#define preempt_schedule_dynamic_disabled NULL extern asmlinkage void preempt_schedule_notrace(void); extern asmlinkage void preempt_schedule_notrace_thunk(void); -#define __preempt_schedule_notrace_func preempt_schedule_notrace_thunk +#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace_thunk +#define preempt_schedule_notrace_dynamic_disabled NULL #ifdef CONFIG_PREEMPT_DYNAMIC -DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); +DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); #define __preempt_schedule() \ do { \ @@ -125,7 +127,7 @@ do { \ asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \ } while (0) -DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func); +DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); #define __preempt_schedule_notrace() \ do { \ diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 2e2b8d6140ed..a01ac1a0a292 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -456,6 +456,8 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); */ void irqentry_exit_cond_resched(void); #ifdef CONFIG_PREEMPT_DYNAMIC +#define irqentry_exit_cond_resched_dynamic_enabled irqentry_exit_cond_resched +#define irqentry_exit_cond_resched_dynamic_disabled NULL DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a123ffa8e21c..bf3a97f48c1d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6491,7 +6491,11 @@ NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); #ifdef CONFIG_PREEMPT_DYNAMIC -DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); +#ifndef preempt_schedule_dynamic_enabled +#define preempt_schedule_dynamic_enabled preempt_schedule +#define preempt_schedule_dynamic_disabled NULL +#endif +DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule); #endif @@ -6549,7 +6553,11 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) EXPORT_SYMBOL_GPL(preempt_schedule_notrace); #ifdef CONFIG_PREEMPT_DYNAMIC -DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func); +#ifndef preempt_schedule_notrace_dynamic_enabled +#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace +#define preempt_schedule_notrace_dynamic_disabled NULL +#endif +DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); #endif @@ -8060,9 +8068,13 @@ EXPORT_SYMBOL(__cond_resched); #endif #ifdef CONFIG_PREEMPT_DYNAMIC +#define cond_resched_dynamic_enabled __cond_resched +#define cond_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); EXPORT_STATIC_CALL_TRAMP(cond_resched); +#define might_resched_dynamic_enabled __cond_resched +#define might_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); EXPORT_STATIC_CALL_TRAMP(might_resched); #endif @@ -8192,43 +8204,46 @@ int sched_dynamic_mode(const char *str) return -EINVAL; } +#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) +#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) + void sched_dynamic_update(int mode) { /* * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in * the ZERO state, which is invalid. */ - static_call_update(cond_resched, __cond_resched); - static_call_update(might_resched, __cond_resched); - static_call_update(preempt_schedule, __preempt_schedule_func); - static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); - static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); + preempt_dynamic_enable(cond_resched); + preempt_dynamic_enable(might_resched); + preempt_dynamic_enable(preempt_schedule); + preempt_dynamic_enable(preempt_schedule_notrace); + preempt_dynamic_enable(irqentry_exit_cond_resched); switch (mode) { case preempt_dynamic_none: - static_call_update(cond_resched, __cond_resched); - static_call_update(might_resched, (void *)&__static_call_return0); - static_call_update(preempt_schedule, NULL); - static_call_update(preempt_schedule_notrace, NULL); - static_call_update(irqentry_exit_cond_resched, NULL); + preempt_dynamic_enable(cond_resched); + preempt_dynamic_disable(might_resched); + preempt_dynamic_disable(preempt_schedule); + preempt_dynamic_disable(preempt_schedule_notrace); + preempt_dynamic_disable(irqentry_exit_cond_resched); pr_info("Dynamic Preempt: none\n"); break; case preempt_dynamic_voluntary: - static_call_update(cond_resched, __cond_resched); - static_call_update(might_resched, __cond_resched); - static_call_update(preempt_schedule, NULL); - static_call_update(preempt_schedule_notrace, NULL); - static_call_update(irqentry_exit_cond_resched, NULL); + preempt_dynamic_enable(cond_resched); + preempt_dynamic_enable(might_resched); + preempt_dynamic_disable(preempt_schedule); + preempt_dynamic_disable(preempt_schedule_notrace); + preempt_dynamic_disable(irqentry_exit_cond_resched); pr_info("Dynamic Preempt: voluntary\n"); break; case preempt_dynamic_full: - static_call_update(cond_resched, (void *)&__static_call_return0); - static_call_update(might_resched, (void *)&__static_call_return0); - static_call_update(preempt_schedule, __preempt_schedule_func); - static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); - static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); + preempt_dynamic_disable(cond_resched); + preempt_dynamic_disable(might_resched); + preempt_dynamic_enable(preempt_schedule); + preempt_dynamic_enable(preempt_schedule_notrace); + preempt_dynamic_enable(irqentry_exit_cond_resched); pr_info("Dynamic Preempt: full\n"); break; } -- cgit v1.2.3-71-gd317 From 4624a14f4daa8ab4578d274555fd8847254ce339 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Feb 2022 16:52:12 +0000 Subject: sched/preempt: Simplify irqentry_exit_cond_resched() callers Currently callers of irqentry_exit_cond_resched() need to be aware of whether the function should be indirected via a static call, leading to ugly ifdeffery in callers. Save them the hassle with a static inline wrapper that does the right thing. The raw_irqentry_exit_cond_resched() will also be useful in subsequent patches which will add conditional wrappers for preemption functions. Note: in arch/x86/entry/common.c, xen_pv_evtchn_do_upcall() always calls irqentry_exit_cond_resched() directly, even when PREEMPT_DYNAMIC is in use. I believe this is a latent bug (which this patch corrects), but I'm not entirely certain this wasn't deliberate. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Acked-by: Ard Biesheuvel Acked-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20220214165216.2231574-4-mark.rutland@arm.com --- include/linux/entry-common.h | 9 ++++++--- kernel/entry/common.c | 12 ++++-------- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index a01ac1a0a292..dfd84c59b144 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -454,11 +454,14 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); * * Conditional reschedule with additional sanity checks. */ -void irqentry_exit_cond_resched(void); +void raw_irqentry_exit_cond_resched(void); #ifdef CONFIG_PREEMPT_DYNAMIC -#define irqentry_exit_cond_resched_dynamic_enabled irqentry_exit_cond_resched +#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched #define irqentry_exit_cond_resched_dynamic_disabled NULL -DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); +DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); +#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() +#else +#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() #endif /** diff --git a/kernel/entry/common.c b/kernel/entry/common.c index bad713684c2e..1739ca79613b 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -380,7 +380,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) return ret; } -void irqentry_exit_cond_resched(void) +void raw_irqentry_exit_cond_resched(void) { if (!preempt_count()) { /* Sanity check RCU and thread stack */ @@ -392,7 +392,7 @@ void irqentry_exit_cond_resched(void) } } #ifdef CONFIG_PREEMPT_DYNAMIC -DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); +DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); #endif noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) @@ -420,13 +420,9 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) } instrumentation_begin(); - if (IS_ENABLED(CONFIG_PREEMPTION)) { -#ifdef CONFIG_PREEMPT_DYNAMIC - static_call(irqentry_exit_cond_resched)(); -#else + if (IS_ENABLED(CONFIG_PREEMPTION)) irqentry_exit_cond_resched(); -#endif - } + /* Covers both tracing and lockdep */ trace_hardirqs_on(); instrumentation_end(); -- cgit v1.2.3-71-gd317 From 99cf983cc8bca4adb461b519664c939a565cfd4d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Feb 2022 16:52:14 +0000 Subject: sched/preempt: Add PREEMPT_DYNAMIC using static keys Where an architecture selects HAVE_STATIC_CALL but not HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline which will either branch to a callee or return to the caller. On such architectures, a number of constraints can conspire to make those trampolines more complicated and potentially less useful than we'd like. For example: * Hardware and software control flow integrity schemes can require the addition of "landing pad" instructions (e.g. `BTI` for arm64), which will also be present at the "real" callee. * Limited branch ranges can require that trampolines generate or load an address into a register and perform an indirect branch (or at least have a slow path that does so). This loses some of the benefits of having a direct branch. * Interaction with SW CFI schemes can be complicated and fragile, e.g. requiring that we can recognise idiomatic codegen and remove indirections understand, at least until clang proves more helpful mechanisms for dealing with this. For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we really only need to enable/disable specific preemption functions. We can achieve the same effect without a number of the pain points above by using static keys to fold early returns into the preemption functions themselves rather than in an out-of-line trampoline, effectively inlining the trampoline into the start of the function. For arm64, this results in good code generation. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | : | bti c | b // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Any architecture which implements static keys should be able to use this to implement PREEMPT_DYNAMIC with similar cost to non-inlined static calls. Since this is likely to have greater overhead than (inlined) static calls, PREEMPT_DYNAMIC is only defaulted to enabled when HAVE_PREEMPT_DYNAMIC_CALL is selected. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Acked-by: Ard Biesheuvel Acked-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com --- arch/Kconfig | 36 ++++++++++++++++++++++++++--- arch/x86/Kconfig | 2 +- include/linux/entry-common.h | 10 ++++++-- include/linux/kernel.h | 7 +++++- include/linux/sched.h | 10 +++++++- kernel/Kconfig.preempt | 3 ++- kernel/entry/common.c | 11 +++++++++ kernel/sched/core.c | 54 ++++++++++++++++++++++++++++++++++++++++++-- 8 files changed, 122 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 601691f1570f..d544abd14c01 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1278,11 +1278,41 @@ config HAVE_STATIC_CALL_INLINE config HAVE_PREEMPT_DYNAMIC bool + +config HAVE_PREEMPT_DYNAMIC_CALL + bool depends on HAVE_STATIC_CALL + select HAVE_PREEMPT_DYNAMIC + help + An architecture should select this if it can handle the preemption + model being selected at boot time using static calls. + + Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a + preemption function will be patched directly. + + Where an architecture does not select HAVE_STATIC_CALL_INLINE, any + call to a preemption function will go through a trampoline, and the + trampoline will be patched. + + It is strongly advised to support inline static call to avoid any + overhead. + +config HAVE_PREEMPT_DYNAMIC_KEY + bool + depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO + select HAVE_PREEMPT_DYNAMIC help - Select this if the architecture support boot time preempt setting - on top of static calls. It is strongly advised to support inline - static call to avoid any overhead. + An architecture should select this if it can handle the preemption + model being selected at boot time using static keys. + + Each preemption function will be given an early return based on a + static key. This should have slightly lower overhead than non-inline + static calls, as this effectively inlines each trampoline into the + start of its callee. This may avoid redundant work, and may + integrate better with CFI schemes. + + This will have greater overhead than using inline static calls as + the call to the preemption function cannot be entirely elided. config ARCH_WANT_LD_ORPHAN_WARN bool diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ebe8fc76949a..f13cfdfb30ce 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -245,7 +245,7 @@ config X86 select HAVE_STACK_VALIDATION if X86_64 select HAVE_STATIC_CALL select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION - select HAVE_PREEMPT_DYNAMIC + select HAVE_PREEMPT_DYNAMIC_CALL select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index dfd84c59b144..141952f4fee8 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -456,13 +456,19 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); */ void raw_irqentry_exit_cond_resched(void); #ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched #define irqentry_exit_cond_resched_dynamic_disabled NULL DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() -#else -#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_irqentry_exit_cond_resched(void); +#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() #endif +#else /* CONFIG_PREEMPT_DYNAMIC */ +#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() +#endif /* CONFIG_PREEMPT_DYNAMIC */ /** * irqentry_exit - Handle return from exception that used irqentry_enter() diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 33f47a996513..a890428bcc1a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -99,7 +99,7 @@ struct user; extern int __cond_resched(void); # define might_resched() __cond_resched() -#elif defined(CONFIG_PREEMPT_DYNAMIC) +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) extern int __cond_resched(void); @@ -110,6 +110,11 @@ static __always_inline void might_resched(void) static_call_mod(might_resched)(); } +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) + +extern int dynamic_might_resched(void); +# define might_resched() dynamic_might_resched() + #else # define might_resched() do { } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 508b91d57470..de03ddeb064b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2020,7 +2020,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) extern int __cond_resched(void); -#ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) DECLARE_STATIC_CALL(cond_resched, __cond_resched); @@ -2029,6 +2029,14 @@ static __always_inline int _cond_resched(void) return static_call_mod(cond_resched)(); } +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +extern int dynamic_cond_resched(void); + +static __always_inline int _cond_resched(void) +{ + return dynamic_cond_resched(); +} + #else static inline int _cond_resched(void) diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index ce77f0265660..c2f1fd95a821 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -96,8 +96,9 @@ config PREEMPTION config PREEMPT_DYNAMIC bool "Preemption behaviour defined on boot" depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT + select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY select PREEMPT_BUILD - default y + default y if HAVE_PREEMPT_DYNAMIC_CALL help This option allows to define the preemption model on the kernel command line parameter and thus override the default preemption diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 1739ca79613b..b145249ad91a 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -392,7 +393,17 @@ void raw_irqentry_exit_cond_resched(void) } } #ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_irqentry_exit_cond_resched(void) +{ + if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) + return; + raw_irqentry_exit_cond_resched(); +} +#endif #endif noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 300c0454a2b8..9e65028189f4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -6484,21 +6485,31 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; - preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); #ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) #ifndef preempt_schedule_dynamic_enabled #define preempt_schedule_dynamic_enabled preempt_schedule #define preempt_schedule_dynamic_disabled NULL #endif DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule); +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); +void __sched notrace dynamic_preempt_schedule(void) +{ + if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) + return; + preempt_schedule(); +} +NOKPROBE_SYMBOL(dynamic_preempt_schedule); +EXPORT_SYMBOL(dynamic_preempt_schedule); +#endif #endif - /** * preempt_schedule_notrace - preempt_schedule called by tracing @@ -6553,12 +6564,24 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) EXPORT_SYMBOL_GPL(preempt_schedule_notrace); #ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) #ifndef preempt_schedule_notrace_dynamic_enabled #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace #define preempt_schedule_notrace_dynamic_disabled NULL #endif DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); +void __sched notrace dynamic_preempt_schedule_notrace(void) +{ + if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) + return; + preempt_schedule_notrace(); +} +NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); +EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); +#endif #endif #endif /* CONFIG_PREEMPTION */ @@ -8068,6 +8091,7 @@ EXPORT_SYMBOL(__cond_resched); #endif #ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) #define cond_resched_dynamic_enabled __cond_resched #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); @@ -8077,6 +8101,25 @@ EXPORT_STATIC_CALL_TRAMP(cond_resched); #define might_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); EXPORT_STATIC_CALL_TRAMP(might_resched); +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); +int __sched dynamic_cond_resched(void) +{ + if (!static_branch_unlikely(&sk_dynamic_cond_resched)) + return 0; + return __cond_resched(); +} +EXPORT_SYMBOL(dynamic_cond_resched); + +static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); +int __sched dynamic_might_resched(void) +{ + if (!static_branch_unlikely(&sk_dynamic_might_resched)) + return 0; + return __cond_resched(); +} +EXPORT_SYMBOL(dynamic_might_resched); +#endif #endif /* @@ -8206,8 +8249,15 @@ int sched_dynamic_mode(const char *str) return -EINVAL; } +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) +#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) +#else +#error "Unsupported PREEMPT_DYNAMIC mechanism" +#endif void sched_dynamic_update(int mode) { -- cgit v1.2.3-71-gd317 From 64b4a0f8b51b20e0c9dbff7748365994364d5f01 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Sat, 19 Feb 2022 11:47:22 +0000 Subject: net: phylink: remove phylink_config's pcs_poll phylink_config's pcs_poll is no longer used, let's get rid of it. Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 3 +-- include/linux/phylink.h | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 5b53a3e23c89..26f1219a005f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1300,7 +1300,7 @@ void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) if (!pl->phylink_disable_state && pl->cfg_link_an_mode == MLO_AN_INBAND) { - if (pl->config->pcs_poll || pcs->poll) + if (pcs->poll) mod_timer(&pl->link_poll, jiffies + HZ); else del_timer(&pl->link_poll); @@ -1673,7 +1673,6 @@ void phylink_start(struct phylink *pl) poll |= pl->config->poll_fixed_state; break; case MLO_AN_INBAND: - poll |= pl->config->pcs_poll; if (pl->pcs) poll |= pl->pcs->poll; break; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cca149f78d35..9ef9b7047f19 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -86,7 +86,6 @@ enum phylink_op_type { * @type: operation type of PHYLINK instance * @legacy_pre_march2020: driver has not been updated for March 2020 updates * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") - * @pcs_poll: MAC PCS cannot provide link change interrupt * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND @@ -100,7 +99,6 @@ struct phylink_config { struct device *dev; enum phylink_op_type type; bool legacy_pre_march2020; - bool pcs_poll; bool poll_fixed_state; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, -- cgit v1.2.3-71-gd317 From efcef265fd83d9a68a68926abecb3e1dd3e260a8 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Tue, 15 Feb 2022 21:49:26 +0300 Subject: ata: add/use ata_taskfile::{error|status} fields Add the explicit error and status register fields to 'struct ata_taskfile' using the anonymous *union*s ('struct ide_taskfile' had that for ages!) and update the libata taskfile code accordingly. There should be no object code changes resulting from that... Signed-off-by: Sergey Shtylyov Signed-off-by: Damien Le Moal --- drivers/ata/acard-ahci.c | 2 +- drivers/ata/ahci.c | 4 ++-- drivers/ata/ahci_qoriq.c | 2 +- drivers/ata/ahci_xgene.c | 2 +- drivers/ata/libahci.c | 4 ++-- drivers/ata/libata-acpi.c | 8 ++++---- drivers/ata/libata-core.c | 12 ++++++------ drivers/ata/libata-eh.c | 42 +++++++++++++++++++++--------------------- drivers/ata/libata-sata.c | 10 +++++----- drivers/ata/libata-scsi.c | 22 +++++++++++----------- drivers/ata/libata-sff.c | 6 +++--- drivers/ata/pata_ep93xx.c | 4 ++-- drivers/ata/pata_ns87415.c | 4 ++-- drivers/ata/pata_octeon_cf.c | 4 ++-- drivers/ata/pata_samsung_cf.c | 2 +- drivers/ata/sata_highbank.c | 2 +- drivers/ata/sata_inic162x.c | 10 +++++----- drivers/ata/sata_rcar.c | 4 ++-- drivers/ata/sata_svw.c | 10 +++++----- drivers/ata/sata_vsc.c | 10 +++++----- include/linux/libata.h | 10 ++++++++-- 21 files changed, 90 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 536d4cb8f08b..7654a40c12b4 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -265,7 +265,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && !(qc->flags & ATA_QCFLAG_FAILED)) { ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); - qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; } else ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 51045fd05243..bb14683a8d4b 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -739,7 +739,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), @@ -808,7 +808,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, &online, diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index bf5b388bd4e0..cca78e25b173 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -123,7 +123,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, &online, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 8e206379d699..c5b392e07e65 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, do { /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 0ed484e04fd6..cf8c7fd59ada 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1561,7 +1561,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); rc = sata_link_hardreset(link, timing, deadline, online, @@ -2033,7 +2033,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && !(qc->flags & ATA_QCFLAG_FAILED)) { ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); - qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; } else ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index b788f5a4565c..3d345d173556 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -546,13 +546,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev, tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - tf->feature = gtf->tf[0]; /* 0x1f1 */ + tf->error = gtf->tf[0]; /* 0x1f1 */ tf->nsect = gtf->tf[1]; /* 0x1f2 */ tf->lbal = gtf->tf[2]; /* 0x1f3 */ tf->lbam = gtf->tf[3]; /* 0x1f4 */ tf->lbah = gtf->tf[4]; /* 0x1f5 */ tf->device = gtf->tf[5]; /* 0x1f6 */ - tf->command = gtf->tf[6]; /* 0x1f7 */ + tf->status = gtf->tf[6]; /* 0x1f7 */ } static int ata_acpi_filter_tf(struct ata_device *dev, @@ -679,7 +679,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, "(%s) rejected by device (Stat=0x%02x Err=0x%02x)", tf.command, tf.feature, tf.nsect, tf.lbal, tf.lbam, tf.lbah, tf.device, descr, - rtf.command, rtf.feature); + rtf.status, rtf.error); rc = 0; break; @@ -689,7 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", tf.command, tf.feature, tf.nsect, tf.lbal, tf.lbam, tf.lbah, tf.device, descr, - err_mask, rtf.command, rtf.feature); + err_mask, rtf.status, rtf.error); rc = -EIO; break; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 0ddb49f2ece1..15172415b87e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1171,7 +1171,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) ata_dev_warn(dev, "failed to read native max address (err_mask=0x%x)\n", err_mask); - if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) return -EACCES; return -EIO; } @@ -1235,7 +1235,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) "failed to set max address (err_mask=0x%x)\n", err_mask); if (err_mask == AC_ERR_DEV && - (tf.feature & (ATA_ABORTED | ATA_IDNF))) + (tf.error & (ATA_ABORTED | ATA_IDNF))) return -EACCES; return -EIO; } @@ -1584,7 +1584,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, /* perform minimal error analysis */ if (qc->flags & ATA_QCFLAG_FAILED) { - if (qc->result_tf.command & (ATA_ERR | ATA_DF)) + if (qc->result_tf.status & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; if (!qc->err_mask) @@ -1593,7 +1593,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { - qc->result_tf.command |= ATA_SENSE; + qc->result_tf.status |= ATA_SENSE; } /* finish up */ @@ -1813,7 +1813,7 @@ retry: return 0; } - if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { + if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) { /* Device or controller might have reported * the wrong device class. Give a shot at the * other IDENTIFY if the current one is @@ -4375,7 +4375,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, /* A clean abort indicates an original or just out of spec drive and we should continue as we issue the setup based on the drive reported working geometry */ - if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) err_mask = 0; return err_mask; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e31341c9ac61..3307ed45fe4d 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1386,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); if (err_mask == AC_ERR_DEV) - *r_sense_key = tf.feature >> 4; + *r_sense_key = tf.error >> 4; return err_mask; } @@ -1429,12 +1429,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc, err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); /* Ignore err_mask; ATA_ERR might be set */ - if (tf.command & ATA_SENSE) { + if (tf.status & ATA_SENSE) { ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); qc->flags |= ATA_QCFLAG_SENSE_VALID; } else { ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", - tf.command, err_mask); + tf.status, err_mask); } } @@ -1557,7 +1557,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, const struct ata_taskfile *tf) { unsigned int tmp, action = 0; - u8 stat = tf->command, err = tf->feature; + u8 stat = tf->status, err = tf->error; if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; @@ -1594,7 +1594,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, - qc->result_tf.feature >> 4); + qc->result_tf.error >> 4); if (!tmp) qc->flags |= ATA_QCFLAG_SENSE_VALID; else @@ -2360,7 +2360,7 @@ static void ata_eh_link_report(struct ata_link *link) cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, cmd->device, qc->tag, data_buf, cdb_buf, - res->command, res->feature, res->nsect, + res->status, res->error, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, @@ -2368,28 +2368,28 @@ static void ata_eh_link_report(struct ata_link *link) qc->err_mask & AC_ERR_NCQ ? " " : ""); #ifdef CONFIG_ATA_VERBOSE_ERROR - if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | - ATA_SENSE | ATA_ERR)) { - if (res->command & ATA_BUSY) + if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | + ATA_SENSE | ATA_ERR)) { + if (res->status & ATA_BUSY) ata_dev_err(qc->dev, "status: { Busy }\n"); else ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", - res->command & ATA_DRDY ? "DRDY " : "", - res->command & ATA_DF ? "DF " : "", - res->command & ATA_DRQ ? "DRQ " : "", - res->command & ATA_SENSE ? "SENSE " : "", - res->command & ATA_ERR ? "ERR " : ""); + res->status & ATA_DRDY ? "DRDY " : "", + res->status & ATA_DF ? "DF " : "", + res->status & ATA_DRQ ? "DRQ " : "", + res->status & ATA_SENSE ? "SENSE " : "", + res->status & ATA_ERR ? "ERR " : ""); } if (cmd->command != ATA_CMD_PACKET && - (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | - ATA_IDNF | ATA_ABORTED))) + (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | + ATA_ABORTED))) ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", - res->feature & ATA_ICRC ? "ICRC " : "", - res->feature & ATA_UNC ? "UNC " : "", - res->feature & ATA_AMNF ? "AMNF " : "", - res->feature & ATA_IDNF ? "IDNF " : "", - res->feature & ATA_ABORTED ? "ABRT " : ""); + res->error & ATA_ICRC ? "ICRC " : "", + res->error & ATA_UNC ? "UNC " : "", + res->error & ATA_AMNF ? "AMNF " : "", + res->error & ATA_IDNF ? "IDNF " : "", + res->error & ATA_ABORTED ? "ABRT " : ""); #endif } } diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 071158c0c44c..044a16daa2d4 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis); void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) { - tf->command = fis[2]; /* status */ - tf->feature = fis[3]; /* error */ + tf->status = fis[2]; + tf->error = fis[3]; tf->lbal = fis[4]; tf->lbam = fis[5]; @@ -1406,8 +1406,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, *tag = buf[0] & 0x1f; - tf->command = buf[2]; - tf->feature = buf[3]; + tf->status = buf[2]; + tf->error = buf[3]; tf->lbal = buf[4]; tf->lbam = buf[5]; tf->lbah = buf[6]; @@ -1482,7 +1482,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; if (dev->class == ATA_DEV_ZAC && - ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { + ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) { char sense_key, asc, ascq; sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 1b6f1a0ecebf..9df1a20b77dd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -680,7 +680,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) */ static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf) { - u8 stat = tf->command, err = tf->feature; + u8 stat = tf->status, err = tf->error; if (stat & ATA_BUSY) { ata_port_warn(ap, "status=0x%02x {Busy} ", stat); @@ -871,8 +871,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) * onto sense key, asc & ascq. */ if (qc->err_mask || - tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, + tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, &sense_key, &asc, &ascq, verbose); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { @@ -901,13 +901,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) * Copy registers into sense buffer. */ desc[2] = 0x00; - desc[3] = tf->feature; /* == error reg */ + desc[3] = tf->error; desc[5] = tf->nsect; desc[7] = tf->lbal; desc[9] = tf->lbam; desc[11] = tf->lbah; desc[12] = tf->device; - desc[13] = tf->command; /* == status reg */ + desc[13] = tf->status; /* * Fill in Extend bit, and the high order bytes @@ -922,8 +922,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) } } else { /* Fixed sense format */ - desc[0] = tf->feature; - desc[1] = tf->command; /* status */ + desc[0] = tf->error; + desc[1] = tf->status; desc[2] = tf->device; desc[3] = tf->nsect; desc[7] = 0; @@ -972,14 +972,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) * onto sense key, asc & ascq. */ if (qc->err_mask || - tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { - ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, + tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, &sense_key, &asc, &ascq, verbose); ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); } else { /* Could not decode error */ ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", - tf->command, qc->err_mask); + tf->status, qc->err_mask); ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); return; } @@ -2473,7 +2473,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) /* fill these in, for the case where they are -not- overwritten */ cmd->sense_buffer[0] = 0x70; - cmd->sense_buffer[2] = qc->tf.feature >> 4; + cmd->sense_buffer[2] = qc->tf.error >> 4; ata_qc_reinit(qc); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 2fb3956f66f6..1d28b1cd8baa 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -449,8 +449,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = ata_sff_check_status(ap); - tf->feature = ioread8(ioaddr->error_addr); + tf->status = ata_sff_check_status(ap); + tf->error = ioread8(ioaddr->error_addr); tf->nsect = ioread8(ioaddr->nsect_addr); tf->lbal = ioread8(ioaddr->lbal_addr); tf->lbam = ioread8(ioaddr->lbam_addr); @@ -1825,7 +1825,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, memset(&tf, 0, sizeof(tf)); ap->ops->sff_tf_read(ap, &tf); - err = tf.feature; + err = tf.error; if (r_err) *r_err = err; diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index b78f71c70f27..6c75a22db12b 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ep93xx_pata_data *drv_data = ap->host->private_data; - tf->command = ep93xx_pata_check_status(ap); - tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); + tf->status = ep93xx_pata_check_status(ap); + tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index f4949e704356..9dd6bffefb48 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -264,8 +264,8 @@ void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = ns87560_check_status(ap); - tf->feature = ioread8(ioaddr->error_addr); + tf->status = ns87560_check_status(ap); + tf->error = ioread8(ioaddr->error_addr); tf->nsect = ioread8(ioaddr->nsect_addr); tf->lbal = ioread8(ioaddr->lbal_addr); tf->lbam = ioread8(ioaddr->lbam_addr); diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 05c2ab375756..aaa9f95d814c 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -382,7 +382,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) void __iomem *base = ap->ioaddr.data_addr; blob = __raw_readw(base + 0xc); - tf->feature = blob >> 8; + tf->error = blob >> 8; blob = __raw_readw(base + 2); tf->nsect = blob & 0xff; @@ -394,7 +394,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) blob = __raw_readw(base + 6); tf->device = blob & 0xff; - tf->command = blob >> 8; + tf->status = blob >> 8; if (tf->flags & ATA_TFLAG_LBA48) { if (likely(ap->ioaddr.ctl_addr)) { diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index bdd2178c4a61..aba1536ddd44 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->feature = ata_inb(ap->host, ioaddr->error_addr); + tf->error = ata_inb(ap->host, ioaddr->error_addr); tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 077bce76c445..40bb27f83ecf 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; + tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); do { diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 781901151d82..11e518f0111c 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -557,13 +557,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { void __iomem *port_base = inic_port_base(ap); - tf->feature = readb(port_base + PORT_TF_FEATURE); + tf->error = readb(port_base + PORT_TF_FEATURE); tf->nsect = readb(port_base + PORT_TF_NSECT); tf->lbal = readb(port_base + PORT_TF_LBAL); tf->lbam = readb(port_base + PORT_TF_LBAM); tf->lbah = readb(port_base + PORT_TF_LBAH); tf->device = readb(port_base + PORT_TF_DEVICE); - tf->command = readb(port_base + PORT_TF_COMMAND); + tf->status = readb(port_base + PORT_TF_COMMAND); } static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) @@ -580,11 +580,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) */ inic_tf_read(qc->ap, &tf); - if (!(tf.command & ATA_ERR)) + if (!(tf.status & ATA_ERR)) return false; - rtf->command = tf.command; - rtf->feature = tf.feature; + rtf->status = tf.status; + rtf->error = tf.error; return true; } diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index bde76a87beeb..8e4516acb3f6 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -394,8 +394,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - tf->command = sata_rcar_check_status(ap); - tf->feature = ioread32(ioaddr->error_addr); + tf->status = sata_rcar_check_status(ap); + tf->error = ioread32(ioaddr->error_addr); tf->nsect = ioread32(ioaddr->nsect_addr); tf->lbal = ioread32(ioaddr->lbal_addr); tf->lbam = ioread32(ioaddr->lbam_addr); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index f8552559db7f..2e3418a82b44 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - u16 nsect, lbal, lbam, lbah, feature; + u16 nsect, lbal, lbam, lbah, error; - tf->command = k2_stat_check_status(ap); + tf->status = k2_stat_check_status(ap); tf->device = readw(ioaddr->device_addr); - feature = readw(ioaddr->error_addr); + error = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); - tf->feature = feature; + tf->error = error; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { - tf->hob_feature = feature >> 8; + tf->hob_feature = error >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 8fa952cb9f7f..87e4ed66b306 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; - u16 nsect, lbal, lbam, lbah, feature; + u16 nsect, lbal, lbam, lbah, error; - tf->command = ata_sff_check_status(ap); + tf->status = ata_sff_check_status(ap); tf->device = readw(ioaddr->device_addr); - feature = readw(ioaddr->error_addr); + error = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); - tf->feature = feature; + tf->error = error; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { - tf->hob_feature = feature >> 8; + tf->hob_feature = error >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; diff --git a/include/linux/libata.h b/include/linux/libata.h index a49e75c4206d..0619ae462ecd 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -518,7 +518,10 @@ struct ata_taskfile { u8 hob_lbam; u8 hob_lbah; - u8 feature; + union { + u8 error; + u8 feature; + }; u8 nsect; u8 lbal; u8 lbam; @@ -526,7 +529,10 @@ struct ata_taskfile { u8 device; - u8 command; /* IO operation */ + union { + u8 status; + u8 command; + }; u32 auxiliary; /* auxiliary field */ /* from SATA 3.1 and */ -- cgit v1.2.3-71-gd317 From cccd73d607fee52f35b4b030408fa5f6c21ef503 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 16 Feb 2022 09:41:32 -0800 Subject: iosys-map: Add offset to iosys_map_memcpy_to() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In certain situations it's useful to be able to write to an offset of the mapping. Add a dst_offset to iosys_map_memcpy_to(). Cc: Sumit Semwal Cc: Christian König Cc: Thomas Zimmermann Cc: dri-devel@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Lucas De Marchi Reviewed-by: Christian König Reviewed-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220216174147.3073235-2-lucas.demarchi@intel.com --- drivers/gpu/drm/drm_cache.c | 2 +- drivers/gpu/drm/drm_fb_helper.c | 2 +- include/linux/iosys-map.h | 17 +++++++++-------- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 4bb093ccf1b8..4b0da6baff78 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -221,7 +221,7 @@ static void memcpy_fallback(struct iosys_map *dst, if (!dst->is_iomem && !src->is_iomem) { memcpy(dst->vaddr, src->vaddr, len); } else if (!src->is_iomem) { - iosys_map_memcpy_to(dst, src->vaddr, len); + iosys_map_memcpy_to(dst, 0, src->vaddr, len); } else if (!dst->is_iomem) { memcpy_fromio(dst->vaddr, src->vaddr_iomem, len); } else { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e9a9d35fbf5e..6f72627369f8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -385,7 +385,7 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ for (y = clip->y1; y < clip->y2; y++) { - iosys_map_memcpy_to(dst, src, len); + iosys_map_memcpy_to(dst, 0, src, len); iosys_map_incr(dst, fb->pitches[0]); src += fb->pitches[0]; } diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index f4186f91caa6..edd730b1e899 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -220,22 +220,23 @@ static inline void iosys_map_clear(struct iosys_map *map) } /** - * iosys_map_memcpy_to - Memcpy into iosys mapping + * iosys_map_memcpy_to - Memcpy into offset of iosys_map * @dst: The iosys_map structure + * @dst_offset: The offset from which to copy * @src: The source buffer * @len: The number of byte in src * - * Copies data into a iosys mapping. The source buffer is in system - * memory. Depending on the buffer's location, the helper picks the correct - * method of accessing the memory. + * Copies data into a iosys_map with an offset. The source buffer is in + * system memory. Depending on the buffer's location, the helper picks the + * correct method of accessing the memory. */ -static inline void iosys_map_memcpy_to(struct iosys_map *dst, const void *src, - size_t len) +static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset, + const void *src, size_t len) { if (dst->is_iomem) - memcpy_toio(dst->vaddr_iomem, src, len); + memcpy_toio(dst->vaddr_iomem + dst_offset, src, len); else - memcpy(dst->vaddr, src, len); + memcpy(dst->vaddr + dst_offset, src, len); } /** -- cgit v1.2.3-71-gd317 From e62f25e8b3cdd29224c27938addba817aedd4b54 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 16 Feb 2022 09:41:33 -0800 Subject: iosys-map: Add a few more helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First the simplest ones: - iosys_map_memset(): when abstracting system and I/O memory, just like the memcpy() use case, memset() also has dedicated functions to be called for using IO memory. - iosys_map_memcpy_from(): we may need to copy data from I/O memory, not only to. In certain situations it's useful to be able to read or write to an offset that is calculated by having the memory layout given by a struct declaration. Usually we are going to read/write a u8, u16, u32 or u64. As a pre-requisite for the implementation, add iosys_map_memcpy_from() to be the equivalent of iosys_map_memcpy_to(), but in the other direction. Then add 2 pairs of macros: - iosys_map_rd() / iosys_map_wr() - iosys_map_rd_field() / iosys_map_wr_field() The first pair takes the C-type and offset to read/write. The second pair uses a struct describing the layout of the mapping in order to calculate the offset and size being read/written. We could use readb, readw, readl, readq and the write* counterparts, however due to alignment issues this may not work on all architectures. If alignment needs to be checked to call the right function, it's not possible to decide at compile-time which function to call: so just leave the decision to the memcpy function that will do exactly that. Finally, in order to use the above macros with a map derived from another, add another initializer: IOSYS_MAP_INIT_OFFSET(). v2: - Rework IOSYS_MAP_INIT_OFFSET() so it doesn't rely on aliasing rules within the union - Add offset to both iosys_map_rd_field() and iosys_map_wr_field() to allow the struct itself to be at an offset from the mapping - Add documentation to iosys_map_rd_field() with example and expected memory layout v3: - Drop kernel.h include as it's not needed anymore Cc: Sumit Semwal Cc: Christian König Cc: Thomas Zimmermann Cc: Mauro Carvalho Chehab Cc: dri-devel@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Lucas De Marchi Reviewed-by: Mauro Carvalho Chehab Reviewed-by: Matt Atwood Reviewed-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220216174147.3073235-3-lucas.demarchi@intel.com --- include/linux/iosys-map.h | 201 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index edd730b1e899..e69a002d5aa4 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -120,6 +120,45 @@ struct iosys_map { .is_iomem = false, \ } +/** + * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map + * @map_: The dma-buf mapping structure to copy from + * @offset_: Offset to add to the other mapping + * + * Initializes a new iosys_map struct based on another passed as argument. It + * does a shallow copy of the struct so it's possible to update the back storage + * without changing where the original map points to. It is the equivalent of + * doing: + * + * .. code-block:: c + * + * iosys_map map = other_map; + * iosys_map_incr(&map, &offset); + * + * Example usage: + * + * .. code-block:: c + * + * void foo(struct device *dev, struct iosys_map *base_map) + * { + * ... + * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET); + * ... + * } + * + * The advantage of using the initializer over just increasing the offset with + * iosys_map_incr() like above is that the new map will always point to the + * right place of the buffer during its scope. It reduces the risk of updating + * the wrong part of the buffer and having no compiler warning about that. If + * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn + * about the use of uninitialized variable. + */ +#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ + struct iosys_map copy = *map_; \ + iosys_map_incr(©, offset_); \ + copy; \ +}) + /** * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory * @map: The iosys_map structure @@ -239,6 +278,26 @@ static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset, memcpy(dst->vaddr + dst_offset, src, len); } +/** + * iosys_map_memcpy_from - Memcpy from iosys_map into system memory + * @dst: Destination in system memory + * @src: The iosys_map structure + * @src_offset: The offset from which to copy + * @len: The number of byte in src + * + * Copies data from a iosys_map with an offset. The dest buffer is in + * system memory. Depending on the mapping location, the helper picks the + * correct method of accessing the memory. + */ +static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src, + size_t src_offset, size_t len) +{ + if (src->is_iomem) + memcpy_fromio(dst, src->vaddr_iomem + src_offset, len); + else + memcpy(dst, src->vaddr + src_offset, len); +} + /** * iosys_map_incr - Increments the address stored in a iosys mapping * @map: The iosys_map structure @@ -255,4 +314,146 @@ static inline void iosys_map_incr(struct iosys_map *map, size_t incr) map->vaddr += incr; } +/** + * iosys_map_memset - Memset iosys_map + * @dst: The iosys_map structure + * @offset: Offset from dst where to start setting value + * @value: The value to set + * @len: The number of bytes to set in dst + * + * Set value in iosys_map. Depending on the buffer's location, the helper + * picks the correct method of accessing the memory. + */ +static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, + int value, size_t len) +{ + if (dst->is_iomem) + memset_io(dst->vaddr_iomem + offset, value, len); + else + memset(dst->vaddr + offset, value, len); +} + +/** + * iosys_map_rd - Read a C-type value from the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from which to read + * @type__: Type of the value being read + * + * Read a C type value from iosys_map, handling possible un-aligned accesses to + * the mapping. + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val; \ + iosys_map_memcpy_from(&val, map__, offset__, sizeof(val)); \ + val; \ +}) + +/** + * iosys_map_wr - Write a C-type value to the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from the mapping to write to + * @type__: Type of the value being written + * @val__: Value to write + * + * Write a C-type value to the iosys_map, handling possible un-aligned accesses + * to the mapping. + */ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val = (val__); \ + iosys_map_memcpy_to(map__, offset__, &val, sizeof(val)); \ +}) + +/** + * iosys_map_rd_field - Read a member from a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * + * Read a value from iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and its + * value read handling possible un-aligned memory accesses. For example: suppose + * there is a @struct foo defined as below and the value ``foo.field2.inner2`` + * needs to be read from the iosys_map: + * + * .. code-block:: c + * + * struct foo { + * int field1; + * struct { + * int inner1; + * int inner2; + * } field2; + * int field3; + * } __packed; + * + * This is the expected memory layout of a buffer using iosys_map_rd_field(): + * + * +------------------------------+--------------------------+ + * | Address | Content | + * +==============================+==========================+ + * | buffer + 0000 | start of mmapped buffer | + * | | pointed by iosys_map | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + ``struct_offset__`` | start of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + wwww | ``foo.field2.inner2`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + yyyy | end of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + zzzz | end of mmaped buffer | + * +------------------------------+--------------------------+ + * + * Values automatically calculated by this macro or not needed are denoted by + * wwww, yyyy and zzzz. This is the code to read that value: + * + * .. code-block:: c + * + * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2); + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ + struct_type__ *s; \ + iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__)); \ +}) + +/** + * iosys_map_wr_field - Write to a member of a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * @val__: Value to write + * + * Write a value to the iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and the + * @val__ is written handling possible un-aligned memory accesses. Refer to + * iosys_map_rd_field() for expected usage and memory layout. + */ +#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ + struct_type__ *s; \ + iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__), val__); \ +}) + #endif /* __IOSYS_MAP_H__ */ -- cgit v1.2.3-71-gd317 From 643b622b51f1f0015e0a80f90b4ef9032e6ddb1b Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sun, 20 Feb 2022 15:06:32 +0800 Subject: net: tcp: add skb drop reasons to tcp_v{4,6}_inbound_md5_hash() Pass the address of drop reason to tcp_v4_inbound_md5_hash() and tcp_v6_inbound_md5_hash() to store the reasons for skb drops when this function fails. Therefore, the drop reason can be passed to kfree_skb_reason() when the skb needs to be freed. Following drop reasons are added: SKB_DROP_REASON_TCP_MD5NOTFOUND SKB_DROP_REASON_TCP_MD5UNEXPECTED SKB_DROP_REASON_TCP_MD5FAILURE SKB_DROP_REASON_TCP_MD5* above correspond to LINUX_MIB_TCPMD5* Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 12 ++++++++++++ include/trace/events/skb.h | 4 ++++ net/ipv4/tcp_ipv4.c | 13 +++++++++---- net/ipv6/tcp_ipv6.c | 11 ++++++++--- 4 files changed, 33 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a5adbf6b51e8..46678eb587ff 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -346,6 +346,18 @@ enum skb_drop_reason { * udp packet drop out of * udp_memory_allocated. */ + SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one + * expected, corresponding + * to LINUX_MIB_TCPMD5NOTFOUND + */ + SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not + * expecting one, corresponding + * to LINUX_MIB_TCPMD5UNEXPECTED + */ + SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong, + * corresponding to + * LINUX_MIB_TCPMD5FAILURE + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index cfcfd26399f7..46c06b0be850 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -27,6 +27,10 @@ EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ + EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \ + EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \ + TCP_MD5UNEXPECTED) \ + EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a3beab01e9a7..d3c417119057 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1412,7 +1412,8 @@ EXPORT_SYMBOL(tcp_v4_md5_hash_skb); /* Called with rcu_read_lock() */ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - int dif, int sdif) + int dif, int sdif, + enum skb_drop_reason *reason) { #ifdef CONFIG_TCP_MD5SIG /* @@ -1445,11 +1446,13 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { + *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { + *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -1462,6 +1465,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { + *reason = SKB_DROP_REASON_TCP_MD5FAILURE; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", &iph->saddr, ntohs(th->source), @@ -1971,13 +1975,13 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); + enum skb_drop_reason drop_reason; int sdif = inet_sdif(skb); int dif = inet_iif(skb); const struct iphdr *iph; const struct tcphdr *th; bool refcounted; struct sock *sk; - int drop_reason; int ret; drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; @@ -2025,7 +2029,8 @@ process: struct sock *nsk; sk = req->rsk_listener; - if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { + if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, + &drop_reason))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -2099,7 +2104,7 @@ process: goto discard_and_relse; } - if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif)) + if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason)) goto discard_and_relse; nf_reset_ct(skb); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0aa17073df1a..1262b790b146 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -775,7 +775,8 @@ clear_hash_noput: static bool tcp_v6_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - int dif, int sdif) + int dif, int sdif, + enum skb_drop_reason *reason) { #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; @@ -798,11 +799,13 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, return false; if (hash_expected && !hash_location) { + *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { + *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } @@ -813,6 +816,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { + *reason = SKB_DROP_REASON_TCP_MD5FAILURE; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", genhash ? "failed" : "mismatch", @@ -1681,7 +1685,8 @@ process: struct sock *nsk; sk = req->rsk_listener; - if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) { + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, + &drop_reason)) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -1752,7 +1757,7 @@ process: goto discard_and_relse; } - if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason)) goto discard_and_relse; if (tcp_filter(sk, skb)) { -- cgit v1.2.3-71-gd317 From 7a26dc9e7b43f5a24c4b843713e728582adf1c38 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sun, 20 Feb 2022 15:06:33 +0800 Subject: net: tcp: add skb drop reasons to tcp_add_backlog() Pass the address of drop_reason to tcp_add_backlog() to store the reasons for skb drops when fails. Following drop reasons are introduced: SKB_DROP_REASON_SOCKET_BACKLOG Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/net/tcp.h | 3 ++- include/trace/events/skb.h | 1 + net/ipv4/tcp_ipv4.c | 7 +++++-- net/ipv6/tcp_ipv6.c | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 46678eb587ff..f7f33c79945b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -358,6 +358,10 @@ enum skb_drop_reason { * corresponding to * LINUX_MIB_TCPMD5FAILURE */ + SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket + * backlog (see + * LINUX_MIB_TCPBACKLOGDROP) + */ SKB_DROP_REASON_MAX, }; diff --git a/include/net/tcp.h b/include/net/tcp.h index eff2487d972d..04f4650e0ff0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1367,7 +1367,8 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb) __skb_checksum_complete(skb); } -bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); +bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason *reason); #ifdef CONFIG_INET void __sk_defer_free_flush(struct sock *sk); diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 46c06b0be850..bfccd77e9071 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -31,6 +31,7 @@ EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \ TCP_MD5UNEXPECTED) \ EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ + EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d3c417119057..cbca8637ba2f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1811,7 +1811,8 @@ int tcp_v4_early_demux(struct sk_buff *skb) return 0; } -bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) +bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason *reason) { u32 limit, tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; @@ -1837,6 +1838,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_checksum_complete(skb))) { bh_unlock_sock(sk); trace_tcp_bad_csum(skb); + *reason = SKB_DROP_REASON_TCP_CSUM; __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); return true; @@ -1925,6 +1927,7 @@ no_coalesce: if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); + *reason = SKB_DROP_REASON_SOCKET_BACKLOG; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); return true; } @@ -2133,7 +2136,7 @@ process: if (!sock_owned_by_user(sk)) { ret = tcp_v4_do_rcv(sk, skb); } else { - if (tcp_add_backlog(sk, skb)) + if (tcp_add_backlog(sk, skb, &drop_reason)) goto discard_and_relse; } bh_unlock_sock(sk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1262b790b146..abf0ad547858 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1784,7 +1784,7 @@ process: if (!sock_owned_by_user(sk)) { ret = tcp_v6_do_rcv(sk, skb); } else { - if (tcp_add_backlog(sk, skb)) + if (tcp_add_backlog(sk, skb, &drop_reason)) goto discard_and_relse; } bh_unlock_sock(sk); -- cgit v1.2.3-71-gd317 From 2a968ef60e1fac4e694d9f60ce19a3b66b40e8c3 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sun, 20 Feb 2022 15:06:35 +0800 Subject: net: tcp: use tcp_drop_reason() for tcp_rcv_established() Replace tcp_drop() used in tcp_rcv_established() with tcp_drop_reason(). Following drop reasons are added: SKB_DROP_REASON_TCP_FLAGS Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/trace/events/skb.h | 1 + net/ipv4/tcp_input.c | 9 +++++++-- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f7f33c79945b..671db9f49efe 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -362,6 +362,7 @@ enum skb_drop_reason { * backlog (see * LINUX_MIB_TCPBACKLOGDROP) */ + SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index bfccd77e9071..d332e7313a61 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -32,6 +32,7 @@ TCP_MD5UNEXPECTED) \ EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ + EM(SKB_DROP_REASON_TCP_FLAGS, TCP_FLAGS) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0f41fdfd8c27..2d8495a62bc4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5787,6 +5787,7 @@ discard: */ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; const struct tcphdr *th = (const struct tcphdr *)skb->data; struct tcp_sock *tp = tcp_sk(sk); unsigned int len = skb->len; @@ -5875,6 +5876,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; return; } else { /* Header too small */ + reason = SKB_DROP_REASON_PKT_TOO_SMALL; TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } @@ -5930,8 +5932,10 @@ slow_path: if (len < (th->doff << 2) || tcp_checksum_complete(skb)) goto csum_error; - if (!th->ack && !th->rst && !th->syn) + if (!th->ack && !th->rst && !th->syn) { + reason = SKB_DROP_REASON_TCP_FLAGS; goto discard; + } /* * Standard slow path. @@ -5957,12 +5961,13 @@ step5: return; csum_error: + reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); discard: - tcp_drop(sk, skb); + tcp_drop_reason(sk, skb, reason); } EXPORT_SYMBOL(tcp_rcv_established); -- cgit v1.2.3-71-gd317 From a7ec381049c0d1f03e342063d75f5c3b314d0ec2 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sun, 20 Feb 2022 15:06:36 +0800 Subject: net: tcp: use tcp_drop_reason() for tcp_data_queue() Replace tcp_drop() used in tcp_data_queue() with tcp_drop_reason(). Following drop reasons are introduced: SKB_DROP_REASON_TCP_ZEROWINDOW SKB_DROP_REASON_TCP_OLD_DATA SKB_DROP_REASON_TCP_OVERWINDOW SKB_DROP_REASON_TCP_OLD_DATA is used for the case that end_seq of skb less than the left edges of receive window. (Maybe there is a better name?) Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 13 +++++++++++++ include/trace/events/skb.h | 3 +++ net/ipv4/tcp_input.c | 13 +++++++++++-- 3 files changed, 27 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 671db9f49efe..554ef2c848ee 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -363,6 +363,19 @@ enum skb_drop_reason { * LINUX_MIB_TCPBACKLOGDROP) */ SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ + SKB_DROP_REASON_TCP_ZEROWINDOW, /* TCP receive window size is zero, + * see LINUX_MIB_TCPZEROWINDOWDROP + */ + SKB_DROP_REASON_TCP_OLD_DATA, /* the TCP data reveived is already + * received before (spurious retrans + * may happened), see + * LINUX_MIB_DELAYEDACKLOST + */ + SKB_DROP_REASON_TCP_OVERWINDOW, /* the TCP data is out of window, + * the seq of the first byte exceed + * the right edges of receive + * window + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index d332e7313a61..cc1c8f7eaf72 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -33,6 +33,9 @@ EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ EM(SKB_DROP_REASON_TCP_FLAGS, TCP_FLAGS) \ + EM(SKB_DROP_REASON_TCP_ZEROWINDOW, TCP_ZEROWINDOW) \ + EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ + EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2d8495a62bc4..041c778fd0b5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4988,6 +4988,7 @@ void tcp_data_ready(struct sock *sk) static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + enum skb_drop_reason reason; bool fragstolen; int eaten; @@ -5006,6 +5007,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); + reason = SKB_DROP_REASON_NOT_SPECIFIED; tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. @@ -5014,6 +5016,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) { + reason = SKB_DROP_REASON_TCP_ZEROWINDOW; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; } @@ -5023,6 +5026,7 @@ queue_and_out: if (skb_queue_len(&sk->sk_receive_queue) == 0) sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { + reason = SKB_DROP_REASON_PROTO_MEM; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); sk->sk_data_ready(sk); goto drop; @@ -5059,6 +5063,7 @@ queue_and_out: if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { tcp_rcv_spurious_retrans(sk, skb); /* A retransmit, 2nd most common case. Force an immediate ack. */ + reason = SKB_DROP_REASON_TCP_OLD_DATA; NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); @@ -5066,13 +5071,16 @@ out_of_window: tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); inet_csk_schedule_ack(sk); drop: - tcp_drop(sk, skb); + tcp_drop_reason(sk, skb, reason); return; } /* Out of window. F.e. zero window probe. */ - if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) + if (!before(TCP_SKB_CB(skb)->seq, + tp->rcv_nxt + tcp_receive_window(tp))) { + reason = SKB_DROP_REASON_TCP_OVERWINDOW; goto out_of_window; + } if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ @@ -5082,6 +5090,7 @@ drop: * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) { + reason = SKB_DROP_REASON_TCP_ZEROWINDOW; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; } -- cgit v1.2.3-71-gd317 From d25e481be0c519d1a458b14191dc8c2a8bb3e24a Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sun, 20 Feb 2022 15:06:37 +0800 Subject: net: tcp: use tcp_drop_reason() for tcp_data_queue_ofo() Replace tcp_drop() used in tcp_data_queue_ofo with tcp_drop_reason(). Following drop reasons are introduced: SKB_DROP_REASON_TCP_OFOMERGE Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/trace/events/skb.h | 1 + net/ipv4/tcp_input.c | 10 ++++++---- 3 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 554ef2c848ee..a3e90efe6586 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -376,6 +376,10 @@ enum skb_drop_reason { * the right edges of receive * window */ + SKB_DROP_REASON_TCP_OFOMERGE, /* the data of skb is already in + * the ofo queue, corresponding to + * LINUX_MIB_TCPOFOMERGE + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index cc1c8f7eaf72..2ab7193313aa 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -36,6 +36,7 @@ EM(SKB_DROP_REASON_TCP_ZEROWINDOW, TCP_ZEROWINDOW) \ EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ + EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 041c778fd0b5..2088f93fa37b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4779,7 +4779,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); sk->sk_data_ready(sk); - tcp_drop(sk, skb); + tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM); return; } @@ -4842,7 +4842,8 @@ coalesce_done: /* All the bits are present. Drop. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - tcp_drop(sk, skb); + tcp_drop_reason(sk, skb, + SKB_DROP_REASON_TCP_OFOMERGE); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; @@ -4861,7 +4862,8 @@ coalesce_done: TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - tcp_drop(sk, skb1); + tcp_drop_reason(sk, skb1, + SKB_DROP_REASON_TCP_OFOMERGE); goto merge_right; } } else if (tcp_ooo_try_coalesce(sk, skb1, @@ -4889,7 +4891,7 @@ merge_right: tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - tcp_drop(sk, skb1); + tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE); } /* If there is no skb after us, we are the last_skb ! */ if (!skb1) -- cgit v1.2.3-71-gd317 From 44a3918c8245ab10c6c9719dd12e7a8d291980d8 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 18 Feb 2022 11:49:08 -0800 Subject: x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting With unprivileged eBPF enabled, eIBRS (without retpoline) is vulnerable to Spectre v2 BHB-based attacks. When both are enabled, print a warning message and report it in the 'spectre_v2' sysfs vulnerabilities file. Signed-off-by: Josh Poimboeuf Signed-off-by: Borislav Petkov Reviewed-by: Thomas Gleixner --- arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++++++++++------ include/linux/bpf.h | 11 +++++++++++ kernel/sysctl.c | 7 +++++++ 3 files changed, 47 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 79c52dd6c597..0a4267c63d3b 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -650,6 +651,16 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif +#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" + +#ifdef CONFIG_BPF_SYSCALL +void unpriv_ebpf_notify(int new_state) +{ + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && !new_state) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); +} +#endif + static inline bool match_option(const char *arg, int arglen, const char *opt) { int len = strlen(opt); @@ -994,6 +1005,9 @@ static void __init spectre_v2_select_mitigation(void) break; } + if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + if (spectre_v2_in_eibrs_mode(mode)) { /* Force it so VMEXIT will restore correctly */ x86_spec_ctrl_base |= SPEC_CTRL_IBRS; @@ -1780,6 +1794,20 @@ static char *ibpb_state(void) return ""; } +static ssize_t spectre_v2_show_state(char *buf) +{ + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + return sprintf(buf, "%s%s%s%s%s%s\n", + spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", + spectre_v2_module_string()); +} + static ssize_t srbds_show_state(char *buf) { return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); @@ -1805,12 +1833,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - ibpb_state(), - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - stibp_state(), - boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", - spectre_v2_module_string()); + return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fa517ae604ad..1f56806d8eb9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1793,6 +1793,11 @@ struct bpf_core_ctx { int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, int relo_idx, void *insn); +static inline bool unprivileged_ebpf_enabled(void) +{ + return !sysctl_unprivileged_bpf_disabled; +} + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2012,6 +2017,12 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog, { return NULL; } + +static inline bool unprivileged_ebpf_enabled(void) +{ + return false; +} + #endif /* CONFIG_BPF_SYSCALL */ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 5ae443b2882e..730ab56d9e92 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -180,6 +180,10 @@ static int bpf_stats_handler(struct ctl_table *table, int write, return ret; } +void __weak unpriv_ebpf_notify(int new_state) +{ +} + static int bpf_unpriv_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -197,6 +201,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write, return -EPERM; *(int *)table->data = unpriv_enable; } + + unpriv_ebpf_notify(unpriv_enable); + return ret; } #endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */ -- cgit v1.2.3-71-gd317 From 509853f9e1e7b1490dc79f735a5dbafc9298f40d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Feb 2022 19:14:54 +0100 Subject: genirq: Provide generic_handle_irq_safe() Provide generic_handle_irq_safe() which can used from any context. Suggested-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Reviewed-by: Hans de Goede Reviewed-by: Oleksandr Natalenko Reviewed-by: Wolfram Sang Link: https://lore.kernel.org/r/20220211181500.1856198-2-bigeasy@linutronix.de --- include/linux/irqdesc.h | 1 + kernel/irq/irqdesc.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 93d270ca0c56..a77584593f7d 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -160,6 +160,7 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc) int handle_irq_desc(struct irq_desc *desc); int generic_handle_irq(unsigned int irq); +int generic_handle_irq_safe(unsigned int irq); #ifdef CONFIG_IRQ_DOMAIN /* diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 2267e6527db3..346d283d2da1 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -662,6 +662,29 @@ int generic_handle_irq(unsigned int irq) } EXPORT_SYMBOL_GPL(generic_handle_irq); +/** + * generic_handle_irq_safe - Invoke the handler for a particular irq from any + * context. + * @irq: The irq number to handle + * + * Returns: 0 on success, a negative value on error. + * + * This function can be called from any context (IRQ or process context). It + * will report an error if not invoked from IRQ context and the irq has been + * marked to enforce IRQ-context only. + */ +int generic_handle_irq_safe(unsigned int irq) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = handle_irq_desc(irq_to_desc(irq)); + local_irq_restore(flags); + return ret; +} +EXPORT_SYMBOL_GPL(generic_handle_irq_safe); + #ifdef CONFIG_IRQ_DOMAIN /** * generic_handle_domain_irq - Invoke the handler for a HW irq belonging -- cgit v1.2.3-71-gd317 From 93dd04ab0b2b32ae6e70284afc764c577156658e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 18 Feb 2022 14:13:58 +0100 Subject: slab: remove __alloc_size attribute from __kmalloc_track_caller Commit c37495d6254c ("slab: add __alloc_size attributes for better bounds checking") added __alloc_size attributes to a bunch of kmalloc function prototypes. Unfortunately the change to __kmalloc_track_caller seems to cause clang to generate broken code and the first time this is called when booting, the box will crash. While the compiler problems are being reworked and attempted to be solved [1], let's just drop the attribute to solve the issue now. Once it is resolved it can be added back. [1] https://github.com/ClangBuiltLinux/linux/issues/1599 Fixes: c37495d6254c ("slab: add __alloc_size attributes for better bounds checking") Cc: stable Cc: Kees Cook Cc: Daniel Micay Cc: Nick Desaulniers Cc: Christoph Lameter Cc: Pekka Enberg Cc: Joonsoo Kim Cc: Andrew Morton Cc: Vlastimil Babka Cc: Nathan Chancellor Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Cc: llvm@lists.linux.dev Signed-off-by: Greg Kroah-Hartman Acked-by: Nick Desaulniers Acked-by: David Rientjes Acked-by: Kees Cook Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220218131358.3032912-1-gregkh@linuxfoundation.org --- include/linux/slab.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 37bde99b74af..5b6193fd8bd9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -660,8 +660,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag * allocator where we care about the real place the memory allocation * request comes from. */ -extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) - __alloc_size(1); +extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -- cgit v1.2.3-71-gd317 From 05976c5f3bff8f8b5230da4b39f7cd6dfba9943e Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:31 +0000 Subject: firmware: arm_scmi: Support optional system wide atomic-threshold-us An SCMI agent can be configured system-wide with a well-defined atomic threshold: only SCMI synchronous command whose latency has been advertised by the SCMI platform to be lower or equal to this configured threshold will be considered for atomic operations, when requested and if supported by the underlying transport at all. Link: https://lore.kernel.org/r/20220217131234.50328-6-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 27 ++++++++++++++++++++++++--- include/linux/scmi_protocol.h | 5 ++++- 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 4fd5a35ffa2f..7436c475e708 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -131,6 +131,12 @@ struct scmi_protocol_instance { * MAX_PROTOCOLS_IMP elements allocated by the base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -149,6 +155,7 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; + unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -1406,15 +1413,22 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) * SCMI instance is configured as atomic. * * @handle: A reference to the SCMI platform instance. + * @atomic_threshold: An optional return value for the system wide currently + * configured threshold for atomic operations. * * Return: True if transport is configured as atomic */ -static bool scmi_is_transport_atomic(const struct scmi_handle *handle) +static bool scmi_is_transport_atomic(const struct scmi_handle *handle, + unsigned int *atomic_threshold) { + bool ret; struct scmi_info *info = handle_to_scmi_info(handle); - return info->desc->atomic_enabled && - is_transport_polling_capable(info); + ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + if (ret && atomic_threshold) + *atomic_threshold = info->atomic_threshold; + + return ret; } static inline @@ -1954,6 +1968,13 @@ static int scmi_probe(struct platform_device *pdev) handle->version = &info->version; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(np, "atomic-threshold-us", + &info->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %d us\n", + info->atomic_threshold); handle->is_transport_atomic = scmi_is_transport_atomic; if (desc->ops->link_supplier) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9f895cb81818..fdf6bd83cc59 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -619,6 +619,8 @@ struct scmi_notify_ops { * be interested to know if they can assume SCMI * command transactions associated to this handle will * never sleep and act accordingly. + * An optional atomic threshold value could be returned + * where configured. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -629,7 +631,8 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); - bool (*is_transport_atomic)(const struct scmi_handle *handle); + bool (*is_transport_atomic)(const struct scmi_handle *handle, + unsigned int *atomic_threshold); const struct scmi_notify_ops *notify_ops; }; -- cgit v1.2.3-71-gd317 From b7bd36f2e9430a58aefdc326f8e6653e9b000243 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:32 +0000 Subject: firmware: arm_scmi: Add atomic support to clock protocol Introduce new _atomic variant for SCMI clock protocol operations related to enable disable operations: when an atomic operation is required the xfer poll_completion flag is set for that transaction. Link: https://lore.kernel.org/r/20220217131234.50328-7-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 22 +++++++++++++++++++--- include/linux/scmi_protocol.h | 3 +++ 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 35b56c8ba0c0..72f930c0e3e2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -273,7 +273,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - u32 config) + u32 config, bool atomic) { int ret; struct scmi_xfer *t; @@ -284,6 +284,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, if (ret) return ret; + t->hdr.poll_completion = atomic; + cfg = t->tx.buf; cfg->id = cpu_to_le32(clk_id); cfg->attributes = cpu_to_le32(config); @@ -296,12 +298,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE); + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); } static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, 0); + return scmi_clock_config_set(ph, clk_id, 0, false); +} + +static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); +} + +static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, true); } static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) @@ -330,6 +344,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .rate_set = scmi_clock_rate_set, .enable = scmi_clock_enable, .disable = scmi_clock_disable, + .enable_atomic = scmi_clock_enable_atomic, + .disable_atomic = scmi_clock_disable_atomic, }; static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index fdf6bd83cc59..306e576835f8 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -82,6 +82,9 @@ struct scmi_clk_proto_ops { u64 rate); int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*disable_atomic)(const struct scmi_protocol_handle *ph, + u32 clk_id); }; /** -- cgit v1.2.3-71-gd317 From 18f295b758b227857133f680a397a42e83c62f3f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Thu, 17 Feb 2022 13:12:33 +0000 Subject: firmware: arm_scmi: Add support for clock_enable_latency An SCMI platform can optionally advertise an enable latency typically associated with a specific clock resource: add support for parsing such optional message field and export such information in the usual publicly accessible clock descriptor. Link: https://lore.kernel.org/r/20220217131234.50328-8-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/clock.c | 12 +++++++++--- include/linux/scmi_protocol.h | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 72f930c0e3e2..cf6fed6dec77 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; #define CLOCK_ENABLE BIT(0) - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_MAX_STR_SIZE]; + __le32 clock_enable_latency; }; struct scmi_clock_set_config { @@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, attr = t->rx.buf; ret = ph->xops->do_xfer(ph, t); - if (!ret) + if (!ret) { strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); - else + /* Is optional field clock_enable_latency provided ? */ + if (t->rx.len == sizeof(*attr)) + clk->enable_latency = + le32_to_cpu(attr->clock_enable_latency); + } else { clk->name[0] = '\0'; + } ph->xops->xfer_put(ph, t); return ret; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 306e576835f8..b87551f41f9f 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -42,6 +42,7 @@ struct scmi_revision_info { struct scmi_clock_info { char name[SCMI_MAX_STR_SIZE]; + unsigned int enable_latency; bool rate_discrete; union { struct { -- cgit v1.2.3-71-gd317 From f6c052afe6f802d87c74153b7a57c43b2e9faf07 Mon Sep 17 00:00:00 2001 From: Christophe Kerello Date: Sun, 20 Feb 2022 15:14:31 +0000 Subject: nvmem: core: Fix a conflict between MTD and NVMEM on wp-gpios property Wp-gpios property can be used on NVMEM nodes and the same property can be also used on MTD NAND nodes. In case of the wp-gpios property is defined at NAND level node, the GPIO management is done at NAND driver level. Write protect is disabled when the driver is probed or resumed and is enabled when the driver is released or suspended. When no partitions are defined in the NAND DT node, then the NAND DT node will be passed to NVMEM framework. If wp-gpios property is defined in this node, the GPIO resource is taken twice and the NAND controller driver fails to probe. It would be possible to set config->wp_gpio at MTD level before calling nvmem_register function but NVMEM framework will toggle this GPIO on each write when this GPIO should only be controlled at NAND level driver to ensure that the Write Protect has not been enabled. A way to fix this conflict is to add a new boolean flag in nvmem_config named ignore_wp. In case ignore_wp is set, the GPIO resource will be managed by the provider. Fixes: 2a127da461a9 ("nvmem: add support for the write-protect pin") Cc: stable@vger.kernel.org Signed-off-by: Christophe Kerello Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220220151432.16605-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 2 +- include/linux/nvmem-provider.h | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 23a38dcf0fc4..9fd1602b539d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (config->wp_gpio) nvmem->wp_gpio = config->wp_gpio; - else + else if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 98efb7b5660d..c9a3ac9efeaa 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -70,7 +70,8 @@ struct nvmem_keepout { * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. * @priv: User context passed to read/write callbacks. - * @wp-gpio: Write protect pin + * @wp-gpio: Write protect pin + * @ignore_wp: Write Protect pin is managed by the provider. * * Note: A default "nvmem" name will be assigned to the device if * no name is specified in its configuration. In such case "" is @@ -92,6 +93,7 @@ struct nvmem_config { enum nvmem_type type; bool read_only; bool root_only; + bool ignore_wp; struct device_node *of_node; bool no_of_node; nvmem_reg_read_t reg_read; -- cgit v1.2.3-71-gd317 From 190fae468592bc2f0efc8b928920f8f712b5831e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sun, 20 Feb 2022 15:15:15 +0000 Subject: nvmem: core: Remove unused devm_nvmem_unregister() There are no users and seems no will come of the devm_nvmem_unregister(). Remove the function and remove the unused devm_nvmem_match() along with it. Signed-off-by: Andy Shevchenko Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220220151527.17216-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 22 ---------------------- include/linux/nvmem-provider.h | 8 -------- 2 files changed, 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 23a38dcf0fc4..1a70b3bdb71d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -945,28 +945,6 @@ struct nvmem_device *devm_nvmem_register(struct device *dev, } EXPORT_SYMBOL_GPL(devm_nvmem_register); -static int devm_nvmem_match(struct device *dev, void *res, void *data) -{ - struct nvmem_device **r = res; - - return *r == data; -} - -/** - * devm_nvmem_unregister() - Unregister previously registered managed nvmem - * device. - * - * @dev: Device that uses the nvmem device. - * @nvmem: Pointer to previously registered nvmem device. - * - * Return: Will be negative on error or zero on success. - */ -int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) -{ - return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); -} -EXPORT_SYMBOL(devm_nvmem_unregister); - static struct nvmem_device *__nvmem_device_get(void *data, int (*match)(struct device *dev, const void *data)) { diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 98efb7b5660d..99c01c43d7a8 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -133,8 +133,6 @@ void nvmem_unregister(struct nvmem_device *nvmem); struct nvmem_device *devm_nvmem_register(struct device *dev, const struct nvmem_config *cfg); -int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); - void nvmem_add_cell_table(struct nvmem_cell_table *table); void nvmem_del_cell_table(struct nvmem_cell_table *table); @@ -153,12 +151,6 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c) return nvmem_register(c); } -static inline int -devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) -{ - return -EOPNOTSUPP; -} - static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} -- cgit v1.2.3-71-gd317 From 04ec96b768c9dd43946b047c3da60dcc66431370 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 9 Feb 2022 14:43:25 +0100 Subject: random: make more consistent use of integer types We've been using a flurry of int, unsigned int, size_t, and ssize_t. Let's unify all of this into size_t where it makes sense, as it does in most places, and leave ssize_t for return values with possible errors. In addition, keeping with the convention of other functions in this file, functions that are dealing with raw bytes now take void * consistently instead of a mix of that and u8 *, because much of the time we're actually passing some other structure that is then interpreted as bytes by the function. We also take the opportunity to fix the outdated and incorrect comment in get_random_bytes_arch(). Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Reviewed-by: Jann Horn Reviewed-by: Eric Biggers Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 123 +++++++++++++++++++----------------------- include/linux/hw_random.h | 2 +- include/linux/random.h | 10 ++-- include/trace/events/random.h | 79 +++++++++++++-------------- 4 files changed, 99 insertions(+), 115 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index 630b9b9e7d25..768dee5e081a 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -69,7 +69,7 @@ * * The primary kernel interfaces are: * - * void get_random_bytes(void *buf, int nbytes); + * void get_random_bytes(void *buf, size_t nbytes); * u32 get_random_u32() * u64 get_random_u64() * unsigned int get_random_int() @@ -97,14 +97,14 @@ * The current exported interfaces for gathering environmental noise * from the devices are: * - * void add_device_randomness(const void *buf, unsigned int size); + * void add_device_randomness(const void *buf, size_t size); * void add_input_randomness(unsigned int type, unsigned int code, * unsigned int value); * void add_interrupt_randomness(int irq); * void add_disk_randomness(struct gendisk *disk); - * void add_hwgenerator_randomness(const char *buffer, size_t count, + * void add_hwgenerator_randomness(const void *buffer, size_t count, * size_t entropy); - * void add_bootloader_randomness(const void *buf, unsigned int size); + * void add_bootloader_randomness(const void *buf, size_t size); * * add_device_randomness() is for adding data to the random pool that * is likely to differ between two devices (or possibly even per boot). @@ -268,7 +268,7 @@ static int crng_init = 0; #define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; static void process_random_ready_list(void); -static void _get_random_bytes(void *buf, int nbytes); +static void _get_random_bytes(void *buf, size_t nbytes); static struct ratelimit_state unseeded_warning = RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); @@ -290,7 +290,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); static struct { struct blake2s_state hash; spinlock_t lock; - int entropy_count; + unsigned int entropy_count; } input_pool = { .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, @@ -308,18 +308,12 @@ static void crng_reseed(void); * update the entropy estimate. The caller should call * credit_entropy_bits if this is appropriate. */ -static void _mix_pool_bytes(const void *in, int nbytes) +static void _mix_pool_bytes(const void *in, size_t nbytes) { blake2s_update(&input_pool.hash, in, nbytes); } -static void __mix_pool_bytes(const void *in, int nbytes) -{ - trace_mix_pool_bytes_nolock(nbytes, _RET_IP_); - _mix_pool_bytes(in, nbytes); -} - -static void mix_pool_bytes(const void *in, int nbytes) +static void mix_pool_bytes(const void *in, size_t nbytes) { unsigned long flags; @@ -383,18 +377,18 @@ static void process_random_ready_list(void) spin_unlock_irqrestore(&random_ready_list_lock, flags); } -static void credit_entropy_bits(int nbits) +static void credit_entropy_bits(size_t nbits) { - int entropy_count, orig; + unsigned int entropy_count, orig, add; - if (nbits <= 0) + if (!nbits) return; - nbits = min(nbits, POOL_BITS); + add = min_t(size_t, nbits, POOL_BITS); do { orig = READ_ONCE(input_pool.entropy_count); - entropy_count = min(POOL_BITS, orig + nbits); + entropy_count = min_t(unsigned int, POOL_BITS, orig + add); } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); trace_credit_entropy_bits(nbits, entropy_count, _RET_IP_); @@ -443,10 +437,10 @@ static void invalidate_batched_entropy(void); * path. So we can't afford to dilly-dally. Returns the number of * bytes processed from cp. */ -static size_t crng_fast_load(const u8 *cp, size_t len) +static size_t crng_fast_load(const void *cp, size_t len) { unsigned long flags; - u8 *p; + const u8 *src = (const u8 *)cp; size_t ret = 0; if (!spin_trylock_irqsave(&base_crng.lock, flags)) @@ -455,10 +449,9 @@ static size_t crng_fast_load(const u8 *cp, size_t len) spin_unlock_irqrestore(&base_crng.lock, flags); return 0; } - p = base_crng.key; while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { - p[crng_init_cnt % sizeof(base_crng.key)] ^= *cp; - cp++; crng_init_cnt++; len--; ret++; + base_crng.key[crng_init_cnt % sizeof(base_crng.key)] ^= *src; + src++; crng_init_cnt++; len--; ret++; } if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { invalidate_batched_entropy(); @@ -482,7 +475,7 @@ static size_t crng_fast_load(const u8 *cp, size_t len) * something like a fixed DMI table (for example), which might very * well be unique to the machine, but is otherwise unvarying. */ -static void crng_slow_load(const u8 *cp, size_t len) +static void crng_slow_load(const void *cp, size_t len) { unsigned long flags; struct blake2s_state hash; @@ -656,14 +649,15 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) { bool large_request = nbytes > 256; - ssize_t ret = 0, len; + ssize_t ret = 0; + size_t len; u32 chacha_state[CHACHA_STATE_WORDS]; u8 output[CHACHA_BLOCK_SIZE]; if (!nbytes) return 0; - len = min_t(ssize_t, 32, nbytes); + len = min_t(size_t, 32, nbytes); crng_make_state(chacha_state, output, len); if (copy_to_user(buf, output, len)) @@ -683,7 +677,7 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; - len = min_t(ssize_t, nbytes, CHACHA_BLOCK_SIZE); + len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); if (copy_to_user(buf, output, len)) { ret = -EFAULT; break; @@ -721,7 +715,7 @@ struct timer_rand_state { * the entropy pool having similar initial state across largely * identical devices. */ -void add_device_randomness(const void *buf, unsigned int size) +void add_device_randomness(const void *buf, size_t size) { unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; @@ -749,7 +743,7 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; * keyboard scan codes, and 256 upwards for interrupts. * */ -static void add_timer_randomness(struct timer_rand_state *state, unsigned num) +static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { struct { long jiffies; @@ -793,7 +787,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * Round down by 1 bit on general principles, * and limit entropy estimate to 12 bits. */ - credit_entropy_bits(min_t(int, fls(delta >> 1), 11)); + credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11)); } void add_input_randomness(unsigned int type, unsigned int code, @@ -874,8 +868,8 @@ void add_interrupt_randomness(int irq) add_interrupt_bench(cycles); if (unlikely(crng_init == 0)) { - if ((fast_pool->count >= 64) && - crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) { + if (fast_pool->count >= 64 && + crng_fast_load(fast_pool->pool, sizeof(fast_pool->pool)) > 0) { fast_pool->count = 0; fast_pool->last = now; if (spin_trylock(&input_pool.lock)) { @@ -893,7 +887,7 @@ void add_interrupt_randomness(int irq) return; fast_pool->last = now; - __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); + _mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); spin_unlock(&input_pool.lock); fast_pool->count = 0; @@ -1002,18 +996,18 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. */ -static void _get_random_bytes(void *buf, int nbytes) +static void _get_random_bytes(void *buf, size_t nbytes) { u32 chacha_state[CHACHA_STATE_WORDS]; u8 tmp[CHACHA_BLOCK_SIZE]; - ssize_t len; + size_t len; trace_get_random_bytes(nbytes, _RET_IP_); if (!nbytes) return; - len = min_t(ssize_t, 32, nbytes); + len = min_t(size_t, 32, nbytes); crng_make_state(chacha_state, buf, len); nbytes -= len; buf += len; @@ -1036,7 +1030,7 @@ static void _get_random_bytes(void *buf, int nbytes) memzero_explicit(chacha_state, sizeof(chacha_state)); } -void get_random_bytes(void *buf, int nbytes) +void get_random_bytes(void *buf, size_t nbytes) { static void *previous; @@ -1197,25 +1191,19 @@ EXPORT_SYMBOL(del_random_ready_callback); /* * This function will use the architecture-specific hardware random - * number generator if it is available. The arch-specific hw RNG will - * almost certainly be faster than what we can do in software, but it - * is impossible to verify that it is implemented securely (as - * opposed, to, say, the AES encryption of a sequence number using a - * key known by the NSA). So it's useful if we need the speed, but - * only if we're willing to trust the hardware manufacturer not to - * have put in a back door. - * - * Return number of bytes filled in. + * number generator if it is available. It is not recommended for + * use. Use get_random_bytes() instead. It returns the number of + * bytes filled in. */ -int __must_check get_random_bytes_arch(void *buf, int nbytes) +size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) { - int left = nbytes; + size_t left = nbytes; u8 *p = buf; trace_get_random_bytes_arch(left, _RET_IP_); while (left) { unsigned long v; - int chunk = min_t(int, left, sizeof(unsigned long)); + size_t chunk = min_t(size_t, left, sizeof(unsigned long)); if (!arch_get_random_long(&v)) break; @@ -1248,12 +1236,12 @@ early_param("random.trust_cpu", parse_trust_cpu); */ int __init rand_initialize(void) { - int i; + size_t i; ktime_t now = ktime_get_real(); bool arch_init = true; unsigned long rv; - for (i = BLAKE2S_BLOCK_SIZE; i > 0; i -= sizeof(rv)) { + for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { if (!arch_get_random_seed_long_early(&rv) && !arch_get_random_long_early(&rv)) { rv = random_get_entropy(); @@ -1302,7 +1290,7 @@ static ssize_t urandom_read_nowarn(struct file *file, char __user *buf, nbytes = min_t(size_t, nbytes, INT_MAX >> 6); ret = get_random_bytes_user(buf, nbytes); - trace_urandom_read(8 * nbytes, 0, input_pool.entropy_count); + trace_urandom_read(nbytes, input_pool.entropy_count); return ret; } @@ -1346,19 +1334,18 @@ static __poll_t random_poll(struct file *file, poll_table *wait) return mask; } -static int write_pool(const char __user *buffer, size_t count) +static int write_pool(const char __user *ubuf, size_t count) { - size_t bytes; - u8 buf[BLAKE2S_BLOCK_SIZE]; - const char __user *p = buffer; + size_t len; + u8 block[BLAKE2S_BLOCK_SIZE]; - while (count > 0) { - bytes = min(count, sizeof(buf)); - if (copy_from_user(buf, p, bytes)) + while (count) { + len = min(count, sizeof(block)); + if (copy_from_user(block, ubuf, len)) return -EFAULT; - count -= bytes; - p += bytes; - mix_pool_bytes(buf, bytes); + count -= len; + ubuf += len; + mix_pool_bytes(block, len); cond_resched(); } @@ -1368,7 +1355,7 @@ static int write_pool(const char __user *buffer, size_t count) static ssize_t random_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - size_t ret; + int ret; ret = write_pool(buffer, count); if (ret) @@ -1464,8 +1451,6 @@ const struct file_operations urandom_fops = { SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) { - int ret; - if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) return -EINVAL; @@ -1480,6 +1465,8 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, count = INT_MAX; if (!(flags & GRND_INSECURE) && !crng_ready()) { + int ret; + if (flags & GRND_NONBLOCK) return -EAGAIN; ret = wait_for_random_bytes(); @@ -1751,7 +1738,7 @@ unsigned long randomize_page(unsigned long start, unsigned long range) * Those devices may produce endless random bits and will be throttled * when our pool is full. */ -void add_hwgenerator_randomness(const char *buffer, size_t count, +void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy) { if (unlikely(crng_init == 0)) { @@ -1782,7 +1769,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * it would be regarded as device data. * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. */ -void add_bootloader_randomness(const void *buf, unsigned int size) +void add_bootloader_randomness(const void *buf, size_t size) { if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) add_hwgenerator_randomness(buf, size, size * 8); diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8e6dd908da21..1a9fc38f8938 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -61,6 +61,6 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); /** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); +extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/random.h b/include/linux/random.h index c45b2693e51f..e92efb39779c 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -20,8 +20,8 @@ struct random_ready_callback { struct module *owner; }; -extern void add_device_randomness(const void *, unsigned int); -extern void add_bootloader_randomness(const void *, unsigned int); +extern void add_device_randomness(const void *, size_t); +extern void add_bootloader_randomness(const void *, size_t); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) @@ -37,13 +37,13 @@ extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; extern void add_interrupt_randomness(int irq) __latent_entropy; -extern void get_random_bytes(void *buf, int nbytes); +extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); extern int __init rand_initialize(void); extern bool rng_is_initialized(void); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); +extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; @@ -87,7 +87,7 @@ static inline unsigned long get_random_canary(void) /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) +static inline int get_random_bytes_wait(void *buf, size_t nbytes) { int ret = wait_for_random_bytes(); get_random_bytes(buf, nbytes); diff --git a/include/trace/events/random.h b/include/trace/events/random.h index ad149aeaf42c..0609a2810a12 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -9,13 +9,13 @@ #include TRACE_EVENT(add_device_randomness, - TP_PROTO(int bytes, unsigned long IP), + TP_PROTO(size_t bytes, unsigned long IP), TP_ARGS(bytes, IP), TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) + __field(size_t, bytes ) + __field(unsigned long, IP ) ), TP_fast_assign( @@ -23,18 +23,18 @@ TRACE_EVENT(add_device_randomness, __entry->IP = IP; ), - TP_printk("bytes %d caller %pS", + TP_printk("bytes %zu caller %pS", __entry->bytes, (void *)__entry->IP) ); DECLARE_EVENT_CLASS(random__mix_pool_bytes, - TP_PROTO(int bytes, unsigned long IP), + TP_PROTO(size_t bytes, unsigned long IP), TP_ARGS(bytes, IP), TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) + __field(size_t, bytes ) + __field(unsigned long, IP ) ), TP_fast_assign( @@ -42,12 +42,12 @@ DECLARE_EVENT_CLASS(random__mix_pool_bytes, __entry->IP = IP; ), - TP_printk("input pool: bytes %d caller %pS", + TP_printk("input pool: bytes %zu caller %pS", __entry->bytes, (void *)__entry->IP) ); DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, - TP_PROTO(int bytes, unsigned long IP), + TP_PROTO(size_t bytes, unsigned long IP), TP_ARGS(bytes, IP) ); @@ -59,13 +59,13 @@ DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, ); TRACE_EVENT(credit_entropy_bits, - TP_PROTO(int bits, int entropy_count, unsigned long IP), + TP_PROTO(size_t bits, size_t entropy_count, unsigned long IP), TP_ARGS(bits, entropy_count, IP), TP_STRUCT__entry( - __field( int, bits ) - __field( int, entropy_count ) + __field(size_t, bits ) + __field(size_t, entropy_count ) __field(unsigned long, IP ) ), @@ -75,34 +75,34 @@ TRACE_EVENT(credit_entropy_bits, __entry->IP = IP; ), - TP_printk("input pool: bits %d entropy_count %d caller %pS", + TP_printk("input pool: bits %zu entropy_count %zu caller %pS", __entry->bits, __entry->entropy_count, (void *)__entry->IP) ); TRACE_EVENT(add_input_randomness, - TP_PROTO(int input_bits), + TP_PROTO(size_t input_bits), TP_ARGS(input_bits), TP_STRUCT__entry( - __field( int, input_bits ) + __field(size_t, input_bits ) ), TP_fast_assign( __entry->input_bits = input_bits; ), - TP_printk("input_pool_bits %d", __entry->input_bits) + TP_printk("input_pool_bits %zu", __entry->input_bits) ); TRACE_EVENT(add_disk_randomness, - TP_PROTO(dev_t dev, int input_bits), + TP_PROTO(dev_t dev, size_t input_bits), TP_ARGS(dev, input_bits), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, input_bits ) + __field(dev_t, dev ) + __field(size_t, input_bits ) ), TP_fast_assign( @@ -110,17 +110,17 @@ TRACE_EVENT(add_disk_randomness, __entry->input_bits = input_bits; ), - TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), + TP_printk("dev %d,%d input_pool_bits %zu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->input_bits) ); DECLARE_EVENT_CLASS(random__get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), + TP_PROTO(size_t nbytes, unsigned long IP), TP_ARGS(nbytes, IP), TP_STRUCT__entry( - __field( int, nbytes ) + __field(size_t, nbytes ) __field(unsigned long, IP ) ), @@ -129,29 +129,29 @@ DECLARE_EVENT_CLASS(random__get_random_bytes, __entry->IP = IP; ), - TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP) + TP_printk("nbytes %zu caller %pS", __entry->nbytes, (void *)__entry->IP) ); DEFINE_EVENT(random__get_random_bytes, get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), + TP_PROTO(size_t nbytes, unsigned long IP), TP_ARGS(nbytes, IP) ); DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, - TP_PROTO(int nbytes, unsigned long IP), + TP_PROTO(size_t nbytes, unsigned long IP), TP_ARGS(nbytes, IP) ); DECLARE_EVENT_CLASS(random__extract_entropy, - TP_PROTO(int nbytes, int entropy_count), + TP_PROTO(size_t nbytes, size_t entropy_count), TP_ARGS(nbytes, entropy_count), TP_STRUCT__entry( - __field( int, nbytes ) - __field( int, entropy_count ) + __field( size_t, nbytes ) + __field( size_t, entropy_count ) ), TP_fast_assign( @@ -159,37 +159,34 @@ DECLARE_EVENT_CLASS(random__extract_entropy, __entry->entropy_count = entropy_count; ), - TP_printk("input pool: nbytes %d entropy_count %d", + TP_printk("input pool: nbytes %zu entropy_count %zu", __entry->nbytes, __entry->entropy_count) ); DEFINE_EVENT(random__extract_entropy, extract_entropy, - TP_PROTO(int nbytes, int entropy_count), + TP_PROTO(size_t nbytes, size_t entropy_count), TP_ARGS(nbytes, entropy_count) ); TRACE_EVENT(urandom_read, - TP_PROTO(int got_bits, int pool_left, int input_left), + TP_PROTO(size_t nbytes, size_t entropy_count), - TP_ARGS(got_bits, pool_left, input_left), + TP_ARGS(nbytes, entropy_count), TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, pool_left ) - __field( int, input_left ) + __field( size_t, nbytes ) + __field( size_t, entropy_count ) ), TP_fast_assign( - __entry->got_bits = got_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; + __entry->nbytes = nbytes; + __entry->entropy_count = entropy_count; ), - TP_printk("got_bits %d nonblocking_pool_entropy_left %d " - "input_entropy_left %d", __entry->got_bits, - __entry->pool_left, __entry->input_left) + TP_printk("reading: nbytes %zu entropy_count %zu", + __entry->nbytes, __entry->entropy_count) ); TRACE_EVENT(prandom_u32, -- cgit v1.2.3-71-gd317 From 6071a6c0fba2d747742cadcbb3ba26ed756ed73b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 11 Feb 2022 12:28:33 +0100 Subject: random: remove useless header comment This really adds nothing at all useful. Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Reviewed-by: Eric Biggers Signed-off-by: Jason A. Donenfeld --- include/linux/random.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/random.h b/include/linux/random.h index e92efb39779c..37e1e8c43d7e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/linux/random.h - * - * Include file for the random number generator. - */ + #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H -- cgit v1.2.3-71-gd317 From b777c38239fec5a528e59f55b379e31b1a187524 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 13 Feb 2022 16:17:01 +0100 Subject: random: pull add_hwgenerator_randomness() declaration into random.h add_hwgenerator_randomness() is a function implemented and documented inside of random.c. It is the way that hardware RNGs push data into it. Therefore, it should be declared in random.h. Otherwise sparse complains with: random.c:1137:6: warning: symbol 'add_hwgenerator_randomness' was not declared. Should it be static? The alternative would be to include hw_random.h into random.c, but that wouldn't really be good for anything except slowing down compile time. Cc: Matt Mackall Cc: Theodore Ts'o Acked-by: Herbert Xu Reviewed-by: Eric Biggers Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/hw_random/core.c | 1 + include/linux/hw_random.h | 2 -- include/linux/random.h | 2 ++ 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a3db27916256..cfb085de876b 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 1a9fc38f8938..aa1d4da03538 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); -/** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/random.h b/include/linux/random.h index 37e1e8c43d7e..d7354de9351e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -32,6 +32,8 @@ static inline void add_latent_entropy(void) {} extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; extern void add_interrupt_randomness(int irq) __latent_entropy; +extern void add_hwgenerator_randomness(const void *buffer, size_t count, + size_t entropy); extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-71-gd317 From 3191dd5a1179ef0fad5a050a1702ae98b6251e8f Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 13 Feb 2022 22:48:04 +0100 Subject: random: clear fast pool, crng, and batches in cpuhp bring up For the irq randomness fast pool, rather than having to use expensive atomics, which were visibly the most expensive thing in the entire irq handler, simply take care of the extreme edge case of resetting count to zero in the cpuhp online handler, just after workqueues have been reenabled. This simplifies the code a bit and lets us use vanilla variables rather than atomics, and performance should be improved. As well, very early on when the CPU comes up, while interrupts are still disabled, we clear out the per-cpu crng and its batches, so that it always starts with fresh randomness. Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Theodore Ts'o Cc: Sultan Alsawaf Cc: Dominik Brodowski Acked-by: Sebastian Andrzej Siewior Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 62 +++++++++++++++++++++++++++++++++++----------- include/linux/cpuhotplug.h | 2 ++ include/linux/random.h | 5 ++++ kernel/cpu.c | 11 ++++++++ 4 files changed, 65 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index bca4467e540f..d73a75cbe82d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -698,6 +698,25 @@ u32 get_random_u32(void) } EXPORT_SYMBOL(get_random_u32); +#ifdef CONFIG_SMP +/* + * This function is called when the CPU is coming up, with entry + * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. + */ +int random_prepare_cpu(unsigned int cpu) +{ + /* + * When the cpu comes back online, immediately invalidate both + * the per-cpu crng and all batches, so that we serve fresh + * randomness. + */ + per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; + per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; + per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; + return 0; +} +#endif + /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. @@ -1183,7 +1202,7 @@ struct fast_pool { }; struct work_struct mix; unsigned long last; - atomic_t count; + unsigned int count; u16 reg_idx; }; @@ -1219,6 +1238,29 @@ static void fast_mix(u32 pool[4]) static DEFINE_PER_CPU(struct fast_pool, irq_randomness); +#ifdef CONFIG_SMP +/* + * This function is called when the CPU has just come online, with + * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. + */ +int random_online_cpu(unsigned int cpu) +{ + /* + * During CPU shutdown and before CPU onlining, add_interrupt_ + * randomness() may schedule mix_interrupt_randomness(), and + * set the MIX_INFLIGHT flag. However, because the worker can + * be scheduled on a different CPU during this period, that + * flag will never be cleared. For that reason, we zero out + * the flag here, which runs just after workqueues are onlined + * for the CPU again. This also has the effect of setting the + * irq randomness count to zero so that new accumulated irqs + * are fresh. + */ + per_cpu_ptr(&irq_randomness, cpu)->count = 0; + return 0; +} +#endif + static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) { u32 *ptr = (u32 *)regs; @@ -1243,15 +1285,6 @@ static void mix_interrupt_randomness(struct work_struct *work) local_irq_disable(); if (fast_pool != this_cpu_ptr(&irq_randomness)) { local_irq_enable(); - /* - * If we are unlucky enough to have been moved to another CPU, - * during CPU hotplug while the CPU was shutdown then we set - * our count to zero atomically so that when the CPU comes - * back online, it can enqueue work again. The _release here - * pairs with the atomic_inc_return_acquire in - * add_interrupt_randomness(). - */ - atomic_set_release(&fast_pool->count, 0); return; } @@ -1260,7 +1293,7 @@ static void mix_interrupt_randomness(struct work_struct *work) * consistent view, before we reenable irqs again. */ memcpy(pool, fast_pool->pool32, sizeof(pool)); - atomic_set(&fast_pool->count, 0); + fast_pool->count = 0; fast_pool->last = jiffies; local_irq_enable(); @@ -1296,14 +1329,13 @@ void add_interrupt_randomness(int irq) } fast_mix(fast_pool->pool32); - /* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */ - new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count); + new_count = ++fast_pool->count; if (unlikely(crng_init == 0)) { if (new_count >= 64 && crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32), true, true) > 0) { - atomic_set(&fast_pool->count, 0); + fast_pool->count = 0; fast_pool->last = now; if (spin_trylock(&input_pool.lock)) { _mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32)); @@ -1321,7 +1353,7 @@ void add_interrupt_randomness(int irq) if (unlikely(!fast_pool->mix.func)) INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); - atomic_or(MIX_INFLIGHT, &fast_pool->count); + fast_pool->count |= MIX_INFLIGHT; queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); } EXPORT_SYMBOL_GPL(add_interrupt_randomness); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 411a428ace4d..481e565cc5c4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -100,6 +100,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, CPUHP_AP_DTPM_CPU_DEAD, + CPUHP_RANDOM_PREPARE, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -240,6 +241,7 @@ enum cpuhp_state { CPUHP_AP_PERF_CSKY_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/random.h b/include/linux/random.h index d7354de9351e..6148b8d1ccf3 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -156,4 +156,9 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif +#ifdef CONFIG_SMP +extern int random_prepare_cpu(unsigned int cpu); +extern int random_online_cpu(unsigned int cpu); +#endif + #endif /* _LINUX_RANDOM_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 407a2568f35e..238cba15449f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -1659,6 +1660,11 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = perf_event_init_cpu, .teardown.single = perf_event_exit_cpu, }, + [CPUHP_RANDOM_PREPARE] = { + .name = "random:prepare", + .startup.single = random_prepare_cpu, + .teardown.single = NULL, + }, [CPUHP_WORKQUEUE_PREP] = { .name = "workqueue:prepare", .startup.single = workqueue_prepare_cpu, @@ -1782,6 +1788,11 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = workqueue_online_cpu, .teardown.single = workqueue_offline_cpu, }, + [CPUHP_AP_RANDOM_ONLINE] = { + .name = "random:online", + .startup.single = random_online_cpu, + .teardown.single = NULL, + }, [CPUHP_AP_RCUTREE_ONLINE] = { .name = "RCU/tree:online", .startup.single = rcutree_online_cpu, -- cgit v1.2.3-71-gd317 From 0fbb4d93b38bce1f8235aacfa37e90ad8f011473 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 17 Feb 2022 23:40:32 -0500 Subject: dm: add dm_submit_bio_remap interface Where possible, switch from early bio-based IO accounting (at the time DM clones each incoming bio) to late IO accounting just before each remapped bio is issued to underlying device via submit_bio_noacct(). Allows more precise bio-based IO accounting for DM targets that use their own workqueues to perform additional processing of each bio in conjunction with their DM_MAPIO_SUBMITTED return from their map function. When a target is updated to use dm_submit_bio_remap() they must also set ti->accounts_remapped_io to true. Use xchg() in start_io_acct(), as suggested by Mikulas, to ensure each IO is only started once. The xchg race only happens if __send_duplicate_bios() sends multiple bios -- that case is reflected via tio->is_duplicate_bio. Given the niche nature of this race, it is best to avoid any xchg performance penalty for normal IO. For IO that was never submitted with dm_bio_submit_remap(), but the target completes the clone with bio_endio, accounting is started then ended and pending_io counter decremented. Reviewed-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 2 + drivers/md/dm.c | 127 +++++++++++++++++++++++++++++++++++------- include/linux/device-mapper.h | 7 +++ include/uapi/linux/dm-ioctl.h | 4 +- 4 files changed, 118 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 33ef92e90462..8078b6c155ef 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -232,6 +232,8 @@ struct dm_io { struct mapped_device *md; struct bio *orig_bio; blk_status_t status; + bool start_io_acct:1; + int was_accounted; unsigned long start_time; spinlock_t endio_lock; struct dm_stats_aux stats_aux; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d1c618c3f6c6..082366d0ad49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -518,14 +518,33 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, bio->bi_iter.bi_size = bi_size; } -static void dm_start_io_acct(struct dm_io *io) +static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) { - dm_io_acct(false, io->md, io->orig_bio, io->start_time, &io->stats_aux); + dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); } -static void dm_end_io_acct(struct dm_io *io) +static void dm_start_io_acct(struct dm_io *io, struct bio *clone) { - dm_io_acct(true, io->md, io->orig_bio, io->start_time, &io->stats_aux); + /* Must account IO to DM device in terms of orig_bio */ + struct bio *bio = io->orig_bio; + + /* + * Ensure IO accounting is only ever started once. + * Expect no possibility for race unless is_duplicate_bio. + */ + if (!clone || likely(!clone_to_tio(clone)->is_duplicate_bio)) { + if (WARN_ON(io->was_accounted)) + return; + io->was_accounted = 1; + } else if (xchg(&io->was_accounted, 1) == 1) + return; + + __dm_start_io_acct(io, bio); +} + +static void dm_end_io_acct(struct dm_io *io, struct bio *bio) +{ + dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); } static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) @@ -545,11 +564,13 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) io->status = 0; atomic_set(&io->io_count, 1); this_cpu_inc(*md->pending_io); - io->orig_bio = bio; + io->orig_bio = NULL; io->md = md; spin_lock_init(&io->endio_lock); io->start_time = jiffies; + io->start_io_acct = false; + io->was_accounted = 0; dm_stats_record_start(&md->stats, &io->stats_aux); @@ -849,7 +870,16 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error) } io_error = io->status; - dm_end_io_acct(io); + if (io->was_accounted) + dm_end_io_acct(io, bio); + else if (!io_error) { + /* + * Must handle target that DM_MAPIO_SUBMITTED only to + * then bio_endio() rather than dm_submit_bio_remap() + */ + __dm_start_io_acct(io, bio); + dm_end_io_acct(io, bio); + } free_io(io); smp_wmb(); this_cpu_dec(*md->pending_io); @@ -1131,6 +1161,56 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); +static inline void __dm_submit_bio_remap(struct bio *clone, + dev_t dev, sector_t old_sector) +{ + trace_block_bio_remap(clone, dev, old_sector); + submit_bio_noacct(clone); +} + +/* + * @clone: clone bio that DM core passed to target's .map function + * @tgt_clone: clone of @clone bio that target needs submitted + * @from_wq: caller is a workqueue thread managed by DM target + * + * Targets should use this interface to submit bios they take + * ownership of when returning DM_MAPIO_SUBMITTED. + * + * Target should also enable ti->accounts_remapped_io + */ +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, + bool from_wq) +{ + struct dm_target_io *tio = clone_to_tio(clone); + struct dm_io *io = tio->io; + + /* establish bio that will get submitted */ + if (!tgt_clone) + tgt_clone = clone; + + /* + * Account io->origin_bio to DM dev on behalf of target + * that took ownership of IO with DM_MAPIO_SUBMITTED. + */ + if (!from_wq) { + /* Still in target's map function */ + io->start_io_acct = true; + } else { + /* + * Called by another thread, managed by DM target, + * wait for dm_split_and_process_bio() to store + * io->orig_bio + */ + while (unlikely(!smp_load_acquire(&io->orig_bio))) + msleep(1); + dm_start_io_acct(io, clone); + } + + __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), + tio->old_sector); +} +EXPORT_SYMBOL_GPL(dm_submit_bio_remap); + static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) { mutex_lock(&md->swap_bios_lock); @@ -1157,9 +1237,7 @@ static void __map_bio(struct bio *clone) clone->bi_end_io = clone_endio; /* - * Map the clone. If r == 0 we don't need to do - * anything, the target has assumed ownership of - * this io. + * Map the clone. */ dm_io_inc_pending(io); tio->old_sector = clone->bi_iter.bi_sector; @@ -1184,12 +1262,18 @@ static void __map_bio(struct bio *clone) switch (r) { case DM_MAPIO_SUBMITTED: + /* target has assumed ownership of this io */ + if (!ti->accounts_remapped_io) + io->start_io_acct = true; break; case DM_MAPIO_REMAPPED: - /* the bio has been remapped so dispatch it */ - trace_block_bio_remap(clone, bio_dev(io->orig_bio), + /* + * the bio has been remapped so dispatch it, but defer + * dm_start_io_acct() until after possible bio_split(). + */ + __dm_submit_bio_remap(clone, disk_devt(io->md->disk), tio->old_sector); - submit_bio_noacct(clone); + io->start_io_acct = true; break; case DM_MAPIO_KILL: case DM_MAPIO_REQUEUE: @@ -1404,7 +1488,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; - struct bio *b; + struct bio *orig_bio = NULL; int error = 0; init_clone_info(&ci, md, map, bio); @@ -1426,15 +1510,18 @@ static void dm_split_and_process_bio(struct mapped_device *md, * used by dm_end_io_acct() and for dm_io_dec_pending() to use for * completion handling. */ - b = bio_split(bio, bio_sectors(bio) - ci.sector_count, - GFP_NOIO, &md->queue->bio_split); - ci.io->orig_bio = b; - - bio_chain(b, bio); - trace_block_split(b, bio->bi_iter.bi_sector); + orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, + GFP_NOIO, &md->queue->bio_split); + bio_chain(orig_bio, bio); + trace_block_split(orig_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); out: - dm_start_io_acct(ci.io); + if (!orig_bio) + orig_bio = bio; + smp_store_release(&ci.io->orig_bio, orig_bio); + if (ci.io->start_io_acct) + dm_start_io_acct(ci.io, NULL); + /* drop the extra reference count */ dm_io_dec_pending(ci.io, errno_to_blk_status(error)); } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b26fecf6c8e8..7752d14d13f8 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -362,6 +362,12 @@ struct dm_target { * zone append operations using regular writes. */ bool emulate_zone_append:1; + + /* + * Set if the target will submit IO using dm_submit_bio_remap() + * after returning DM_MAPIO_SUBMITTED from its map function. + */ + bool accounts_remapped_io:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -465,6 +471,7 @@ int dm_suspended(struct dm_target *ti); int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, bool from_wq); union map_info *dm_get_rq_mapinfo(struct request *rq); #ifdef CONFIG_BLK_DEV_ZONED diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index c12ce30b52df..2e9550fef90f 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -286,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 45 +#define DM_VERSION_MINOR 46 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2021-03-22)" +#define DM_VERSION_EXTRA "-ioctl (2022-02-22)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ -- cgit v1.2.3-71-gd317 From a8b9d116cda047f38ba29ce26df49c57479ffaa2 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Wed, 26 Jan 2022 05:18:26 -0800 Subject: dm: cleanup double word in comment Remove the second 'a'. Signed-off-by: Tom Rix Signed-off-by: Mike Snitzer --- include/linux/device-mapper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 7752d14d13f8..70cd9449275a 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -358,7 +358,7 @@ struct dm_target { bool limit_swap_bios:1; /* - * Set if this target implements a a zoned device and needs emulation of + * Set if this target implements a zoned device and needs emulation of * zone append operations using regular writes. */ bool emulate_zone_append:1; -- cgit v1.2.3-71-gd317 From e0891269a8c25715bd9510dc355326b00ab42db2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Feb 2022 16:22:26 +0000 Subject: linkage: add SYM_FUNC_ALIAS{,_LOCAL,_WEAK}() Currently aliasing an asm function requires adding START and END annotations for each name, as per Documentation/asm-annotations.rst: SYM_FUNC_START_ALIAS(__memset) SYM_FUNC_START(memset) ... asm insns ... SYM_FUNC_END(memset) SYM_FUNC_END_ALIAS(__memset) This is more painful than necessary to maintain, especially where a function has many aliases, some of which we may wish to define conditionally. For example, arm64's memcpy/memmove implementation (which uses some arch-specific SYM_*() helpers) has: SYM_FUNC_START_ALIAS(__memmove) SYM_FUNC_START_ALIAS_WEAK_PI(memmove) SYM_FUNC_START_ALIAS(__memcpy) SYM_FUNC_START_WEAK_PI(memcpy) ... asm insns ... SYM_FUNC_END_PI(memcpy) EXPORT_SYMBOL(memcpy) SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(__memcpy) SYM_FUNC_END_ALIAS_PI(memmove) EXPORT_SYMBOL(memmove) SYM_FUNC_END_ALIAS(__memmove) EXPORT_SYMBOL(__memmove) SYM_FUNC_START(name) It would be much nicer if we could define the aliases *after* the standard function definition. This would avoid the need to specify each symbol name twice, and would make it easier to spot the canonical function definition. This patch adds new macros to allow us to do so, which allows the above example to be rewritten more succinctly as: SYM_FUNC_START(__pi_memcpy) ... asm insns ... SYM_FUNC_END(__pi_memcpy) SYM_FUNC_ALIAS(__memcpy, __pi_memcpy) EXPORT_SYMBOL(__memcpy) SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) EXPORT_SYMBOL(memcpy) SYM_FUNC_ALIAS(__pi_memmove, __pi_memcpy) SYM_FUNC_ALIAS(__memmove, __pi_memmove) EXPORT_SYMBOL(__memmove) SYM_FUNC_ALIAS_WEAK(memmove, __memmove) EXPORT_SYMBOL(memmove) The reduction in duplication will also make it possible to replace some uses of WEAK with more accurate Kconfig guards, e.g. #ifndef CONFIG_KASAN SYM_FUNC_ALIAS(memmove, __memmove) EXPORT_SYMBOL(memmove) #endif ... which should make it easier to ensure that symbols are neither used nor overidden unexpectedly. The existing SYM_FUNC_START_ALIAS() and SYM_FUNC_START_LOCAL_ALIAS() are marked as deprecated, and will be removed once existing users are moved over to the new scheme. The tools/perf/ copy of linkage.h is updated to match. A subsequent patch will depend upon this when updating the x86 asm annotations. Signed-off-by: Mark Rutland Acked-by: Ard Biesheuvel Acked-by: Josh Poimboeuf Acked-by: Mark Brown Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Slaby Cc: Peter Zijlstra Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220216162229.1076788-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- Documentation/asm-annotations.rst | 16 ++++++++++++-- include/linux/linkage.h | 37 ++++++++++++++++++++++++++++++++- tools/perf/util/include/linux/linkage.h | 35 +++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst index f4bf0f6395fb..4868b58c60fb 100644 --- a/Documentation/asm-annotations.rst +++ b/Documentation/asm-annotations.rst @@ -130,8 +130,20 @@ denoting a range of code via ``SYM_*_START/END`` annotations. In fact, this kind of annotation corresponds to the now deprecated ``ENTRY`` and ``ENDPROC`` macros. -* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` serve for those - who decided to have two or more names for one function. The typical use is:: +* ``SYM_FUNC_ALIAS``, ``SYM_FUNC_ALIAS_LOCAL``, and ``SYM_FUNC_ALIAS_WEAK`` can + be used to define multiple names for a function. The typical use is:: + + SYM_FUNC_START(__memset) + ... asm insns ... + SYN_FUNC_END(__memset) + SYM_FUNC_ALIAS(memset, __memset) + + In this example, one can call ``__memset`` or ``memset`` with the same + result, except the debug information for the instructions is generated to + the object file only once -- for the non-``ALIAS`` case. + +* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` are deprecated + ways to define two or more names for one function. The typical use is:: SYM_FUNC_START_ALIAS(__memset) SYM_FUNC_START(memset) diff --git a/include/linux/linkage.h b/include/linux/linkage.h index dbf8506decca..e574a84d8b11 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -165,7 +165,18 @@ #ifndef SYM_END #define SYM_END(name, sym_type) \ .type name sym_type ASM_NL \ - .size name, .-name + .set .L__sym_size_##name, .-name ASM_NL \ + .size name, .L__sym_size_##name +#endif + +/* SYM_ALIAS -- use only if you have to */ +#ifndef SYM_ALIAS +#define SYM_ALIAS(alias, name, sym_type, linkage) \ + linkage(alias) ASM_NL \ + .set alias, name ASM_NL \ + .type alias sym_type ASM_NL \ + .set .L__sym_size_##alias, .L__sym_size_##name ASM_NL \ + .size alias, .L__sym_size_##alias #endif /* === code annotations === */ @@ -275,6 +286,30 @@ SYM_END(name, SYM_T_FUNC) #endif +/* + * SYM_FUNC_ALIAS -- define a global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS +#define SYM_FUNC_ALIAS(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_GLOBAL) +#endif + +/* + * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_LOCAL +#define SYM_FUNC_ALIAS_LOCAL(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_LOCAL) +#endif + +/* + * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_WEAK +#define SYM_FUNC_ALIAS_WEAK(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) +#endif + /* SYM_CODE_START -- use for non-C (special) functions */ #ifndef SYM_CODE_START #define SYM_CODE_START(name) \ diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h index 5acf053fca7d..7b4cd7947e3f 100644 --- a/tools/perf/util/include/linux/linkage.h +++ b/tools/perf/util/include/linux/linkage.h @@ -50,9 +50,20 @@ #ifndef SYM_END #define SYM_END(name, sym_type) \ .type name sym_type ASM_NL \ + .set .L__sym_size_##name, .-name ASM_NL \ .size name, .-name #endif +/* SYM_ALIAS -- use only if you have to */ +#ifndef SYM_ALIAS +#define SYM_ALIAS(alias, name, sym_type, linkage) \ + linkage(alias) ASM_NL \ + .set alias, name ASM_NL \ + .type alias sym_type ASM_NL \ + .set .L__sym_size_##alias, .L__sym_size_##name ASM_NL \ + .size alias, .L__sym_size_##alias +#endif + /* * SYM_FUNC_START_ALIAS -- use where there are two global names for one * function @@ -101,4 +112,28 @@ SYM_END(name, SYM_T_FUNC) #endif +/* + * SYM_FUNC_ALIAS -- define a global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS +#define SYM_FUNC_ALIAS(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_GLOBAL) +#endif + +/* + * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_LOCAL +#define SYM_FUNC_ALIAS_LOCAL(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_LOCAL) +#endif + +/* + * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_WEAK +#define SYM_FUNC_ALIAS_WEAK(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) +#endif + #endif /* PERF_LINUX_LINKAGE_H_ */ -- cgit v1.2.3-71-gd317 From be9aea74400433e03c2a8b0260fc9ffe2495f698 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Feb 2022 16:22:29 +0000 Subject: linkage: remove SYM_FUNC_{START,END}_ALIAS() Now that all aliases are defined using SYM_FUNC_ALIAS(), remove the old SYM_FUNC_{START,END}_ALIAS() macros. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Ard Biesheuvel Acked-by: Josh Poimboeuf Acked-by: Mark Brown Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Slaby Cc: Peter Zijlstra Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220216162229.1076788-5-mark.rutland@arm.com Signed-off-by: Will Deacon --- Documentation/asm-annotations.rst | 13 ------------- include/linux/linkage.h | 30 ------------------------------ tools/perf/util/include/linux/linkage.h | 21 --------------------- 3 files changed, 64 deletions(-) (limited to 'include/linux') diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst index 4868b58c60fb..a64f2ca469d4 100644 --- a/Documentation/asm-annotations.rst +++ b/Documentation/asm-annotations.rst @@ -142,19 +142,6 @@ denoting a range of code via ``SYM_*_START/END`` annotations. result, except the debug information for the instructions is generated to the object file only once -- for the non-``ALIAS`` case. -* ``SYM_FUNC_START_ALIAS`` and ``SYM_FUNC_START_LOCAL_ALIAS`` are deprecated - ways to define two or more names for one function. The typical use is:: - - SYM_FUNC_START_ALIAS(__memset) - SYM_FUNC_START(memset) - ... asm insns ... - SYM_FUNC_END(memset) - SYM_FUNC_END_ALIAS(__memset) - - In this example, one can call ``__memset`` or ``memset`` with the same - result, except the debug information for the instructions is generated to - the object file only once -- for the non-``ALIAS`` case. - * ``SYM_CODE_START`` and ``SYM_CODE_START_LOCAL`` should be used only in special cases -- if you know what you are doing. This is used exclusively for interrupt handlers and similar where the calling convention is not the C diff --git a/include/linux/linkage.h b/include/linux/linkage.h index e574a84d8b11..acb1ad2356f1 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -211,30 +211,8 @@ SYM_ENTRY(name, linkage, SYM_A_NONE) #endif -/* - * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one - * function - */ -#ifndef SYM_FUNC_START_LOCAL_ALIAS -#define SYM_FUNC_START_LOCAL_ALIAS(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) -#endif - -/* - * SYM_FUNC_START_ALIAS -- use where there are two global names for one - * function - */ -#ifndef SYM_FUNC_START_ALIAS -#define SYM_FUNC_START_ALIAS(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - /* SYM_FUNC_START -- use for global functions */ #ifndef SYM_FUNC_START -/* - * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two - * later. - */ #define SYM_FUNC_START(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) #endif @@ -247,7 +225,6 @@ /* SYM_FUNC_START_LOCAL -- use for local functions */ #ifndef SYM_FUNC_START_LOCAL -/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_START_LOCAL(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) #endif @@ -270,18 +247,11 @@ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) #endif -/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ -#ifndef SYM_FUNC_END_ALIAS -#define SYM_FUNC_END_ALIAS(name) \ - SYM_END(name, SYM_T_FUNC) -#endif - /* * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, * SYM_FUNC_START_WEAK, ... */ #ifndef SYM_FUNC_END -/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_END(name) \ SYM_END(name, SYM_T_FUNC) #endif diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h index 7b4cd7947e3f..aa0c5179836d 100644 --- a/tools/perf/util/include/linux/linkage.h +++ b/tools/perf/util/include/linux/linkage.h @@ -64,38 +64,18 @@ .size alias, .L__sym_size_##alias #endif -/* - * SYM_FUNC_START_ALIAS -- use where there are two global names for one - * function - */ -#ifndef SYM_FUNC_START_ALIAS -#define SYM_FUNC_START_ALIAS(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - /* SYM_FUNC_START -- use for global functions */ #ifndef SYM_FUNC_START -/* - * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two - * later. - */ #define SYM_FUNC_START(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) #endif /* SYM_FUNC_START_LOCAL -- use for local functions */ #ifndef SYM_FUNC_START_LOCAL -/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_START_LOCAL(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) #endif -/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ -#ifndef SYM_FUNC_END_ALIAS -#define SYM_FUNC_END_ALIAS(name) \ - SYM_END(name, SYM_T_FUNC) -#endif - /* SYM_FUNC_START_WEAK -- use for weak functions */ #ifndef SYM_FUNC_START_WEAK #define SYM_FUNC_START_WEAK(name) \ @@ -107,7 +87,6 @@ * SYM_FUNC_START_WEAK, ... */ #ifndef SYM_FUNC_END -/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_END(name) \ SYM_END(name, SYM_T_FUNC) #endif -- cgit v1.2.3-71-gd317 From 6e031ec0e5a2dda53e12e0d2a7e9b15b47a3c502 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 24 Jan 2022 16:11:37 -0700 Subject: vfio/pci: Stub vfio_pci_vga_rw when !CONFIG_VFIO_PCI_VGA Resolve build errors reported against UML build for undefined ioport_map() and ioport_unmap() functions. Without this config option a device cannot have vfio_pci_core_device.has_vga set, so the existing function would always return -EINVAL anyway. Reported-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20220123125737.2658758-1-geert@linux-m68k.org Link: https://lore.kernel.org/r/164306582968.3758255.15192949639574660648.stgit@omen Signed-off-by: Alex Williamson --- drivers/vfio/pci/vfio_pci_rdwr.c | 2 ++ include/linux/vfio_pci_core.h | 9 +++++++++ 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 57d3b2cbbd8e..82ac1569deb0 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -288,6 +288,7 @@ out: return done; } +#ifdef CONFIG_VFIO_PCI_VGA ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { @@ -355,6 +356,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, return done; } +#endif static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd, bool test_mem) diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index ef9a44b6cf5d..ae6f4838ab75 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -159,8 +159,17 @@ extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite); +#ifdef CONFIG_VFIO_PCI_VGA extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite); +#else +static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite) +{ + return -EINVAL; +} +#endif extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, uint64_t data, int count, int fd); -- cgit v1.2.3-71-gd317 From 1a03d3f13ffe5dd24142d6db629e72c11b704d99 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 17 Feb 2022 11:24:04 +0100 Subject: fork: Move task stack accounting to do_exit() There is no need to perform the stack accounting of the outgoing task in its final schedule() invocation which happens with preemption disabled. The task is leaving, the resources will be freed and the accounting can happen in do_exit() before the actual schedule invocation which frees the stack memory. Move the accounting of the stack memory from release_task_stack() to exit_task_stack_account() which then can be invoked from do_exit(). Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: Andy Lutomirski Link: https://lore.kernel.org/r/20220217102406.3697941-7-bigeasy@linutronix.de --- include/linux/sched/task_stack.h | 2 ++ kernel/exit.c | 1 + kernel/fork.c | 35 +++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index d10150587d81..892562ebbd3a 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -79,6 +79,8 @@ static inline void *try_get_task_stack(struct task_struct *tsk) static inline void put_task_stack(struct task_struct *tsk) {} #endif +void exit_task_stack_account(struct task_struct *tsk); + #define task_stack_end_corrupted(task) \ (*(end_of_stack(task)) != STACK_END_MAGIC) diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..c303cffe7fdb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -845,6 +845,7 @@ void __noreturn do_exit(long code) put_page(tsk->task_frag.page); validate_creds_for_do_exit(tsk); + exit_task_stack_account(tsk); check_stack_usage(); preempt_disable(); diff --git a/kernel/fork.c b/kernel/fork.c index ac63e7fa8816..25828127db8d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -211,9 +211,8 @@ static int free_vm_stack_cache(unsigned int cpu) return 0; } -static int memcg_charge_kernel_stack(struct task_struct *tsk) +static int memcg_charge_kernel_stack(struct vm_struct *vm) { - struct vm_struct *vm = task_stack_vm_area(tsk); int i; int ret; @@ -239,6 +238,7 @@ err: static int alloc_thread_stack_node(struct task_struct *tsk, int node) { + struct vm_struct *vm; void *stack; int i; @@ -256,7 +256,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node) /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); - if (memcg_charge_kernel_stack(tsk)) { + if (memcg_charge_kernel_stack(s)) { vfree(s->addr); return -ENOMEM; } @@ -279,7 +279,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node) if (!stack) return -ENOMEM; - if (memcg_charge_kernel_stack(tsk)) { + vm = find_vm_area(stack); + if (memcg_charge_kernel_stack(vm)) { vfree(stack); return -ENOMEM; } @@ -288,19 +289,15 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node) * free_thread_stack() can be called in interrupt context, * so cache the vm_struct. */ - tsk->stack_vm_area = find_vm_area(stack); + tsk->stack_vm_area = vm; tsk->stack = stack; return 0; } static void free_thread_stack(struct task_struct *tsk) { - struct vm_struct *vm = task_stack_vm_area(tsk); int i; - for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) - memcg_kmem_uncharge_page(vm->pages[i], 0); - for (i = 0; i < NR_CACHED_STACKS; i++) { if (this_cpu_cmpxchg(cached_stacks[i], NULL, tsk->stack_vm_area) != NULL) @@ -454,12 +451,25 @@ static void account_kernel_stack(struct task_struct *tsk, int account) } } +void exit_task_stack_account(struct task_struct *tsk) +{ + account_kernel_stack(tsk, -1); + + if (IS_ENABLED(CONFIG_VMAP_STACK)) { + struct vm_struct *vm; + int i; + + vm = task_stack_vm_area(tsk); + for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) + memcg_kmem_uncharge_page(vm->pages[i], 0); + } +} + static void release_task_stack(struct task_struct *tsk) { if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) return; /* Better to leak the stack than to free prematurely */ - account_kernel_stack(tsk, -1); free_thread_stack(tsk); } @@ -918,6 +928,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_THREAD_INFO_IN_TASK refcount_set(&tsk->stack_refcount, 1); #endif + account_kernel_stack(tsk, 1); err = scs_prepare(tsk, node); if (err) @@ -961,8 +972,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->wake_q.next = NULL; tsk->worker_private = NULL; - account_kernel_stack(tsk, 1); - kcov_task_init(tsk); kmap_local_fork(tsk); @@ -981,6 +990,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) return tsk; free_stack: + exit_task_stack_account(tsk); free_thread_stack(tsk); free_tsk: free_task_struct(tsk); @@ -2459,6 +2469,7 @@ bad_fork_cleanup_count: exit_creds(p); bad_fork_free: WRITE_ONCE(p->__state, TASK_DEAD); + exit_task_stack_account(p); put_task_stack(p); delayed_free_task(p); fork_out: -- cgit v1.2.3-71-gd317 From f9b5e46f4097eb298f68e5b02f70697a90a44739 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 18 Feb 2022 17:29:44 -0800 Subject: kasan: split kasan_*enabled() functions into a separate header In an upcoming commit we are going to need to call kasan_hw_tags_enabled() from arch/arm64/include/asm/mte.h. This would create a circular dependency between headers if KASAN_GENERIC or KASAN_SW_TAGS is enabled: linux/kasan.h -> linux/pgtable.h -> asm/pgtable.h -> asm/mte.h -> linux/kasan.h. Break the cycle by introducing a new header linux/kasan-enabled.h with the kasan_*enabled() functions that can be included from asm/mte.h. Link: https://linux-review.googlesource.com/id/I5b0d96c6ed0026fc790899e14d42b2fac6ab568e Signed-off-by: Peter Collingbourne Reviewed-by: Andrey Konovalov Link: https://lore.kernel.org/r/20220219012945.894950-1-pcc@google.com Signed-off-by: Will Deacon --- include/linux/kasan-enabled.h | 33 +++++++++++++++++++++++++++++++++ include/linux/kasan.h | 23 +---------------------- 2 files changed, 34 insertions(+), 22 deletions(-) create mode 100644 include/linux/kasan-enabled.h (limited to 'include/linux') diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h new file mode 100644 index 000000000000..4b6615375022 --- /dev/null +++ b/include/linux/kasan-enabled.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_ENABLED_H +#define _LINUX_KASAN_ENABLED_H + +#ifdef CONFIG_KASAN_HW_TAGS + +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); + +static __always_inline bool kasan_enabled(void) +{ + return static_branch_likely(&kasan_flag_enabled); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return kasan_enabled(); +} + +#else /* CONFIG_KASAN_HW_TAGS */ + +static inline bool kasan_enabled(void) +{ + return IS_ENABLED(CONFIG_KASAN); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return false; +} + +#endif /* CONFIG_KASAN_HW_TAGS */ + +#endif /* LINUX_KASAN_ENABLED_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 4a45562d8893..b6a93261c92a 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -3,6 +3,7 @@ #define _LINUX_KASAN_H #include +#include #include #include #include @@ -83,33 +84,11 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN_HW_TAGS -DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); - -static __always_inline bool kasan_enabled(void) -{ - return static_branch_likely(&kasan_flag_enabled); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return kasan_enabled(); -} - void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); void kasan_free_pages(struct page *page, unsigned int order); #else /* CONFIG_KASAN_HW_TAGS */ -static inline bool kasan_enabled(void) -{ - return IS_ENABLED(CONFIG_KASAN); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return false; -} - static __always_inline void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) { -- cgit v1.2.3-71-gd317 From a773187e37fa5c3bcd92ffda9085707df34f903a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Feb 2022 09:28:27 +0100 Subject: scsi: dm: Remove WRITE_SAME support There are no more end-users of REQ_OP_WRITE_SAME left, so we can start deleting it. Link: https://lore.kernel.org/r/20220209082828.2629273-7-hch@lst.de Reviewed-by: Mike Snitzer Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/md/dm-core.h | 1 - drivers/md/dm-crypt.c | 1 - drivers/md/dm-ebs-target.c | 1 - drivers/md/dm-io.c | 22 ++-------------------- drivers/md/dm-linear.c | 1 - drivers/md/dm-mpath.c | 1 - drivers/md/dm-rq.c | 3 --- drivers/md/dm-stripe.c | 4 +--- drivers/md/dm-table.c | 29 ----------------------------- drivers/md/dm-zone.c | 4 ---- drivers/md/dm.c | 15 --------------- include/linux/device-mapper.h | 6 ------ 12 files changed, 3 insertions(+), 85 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index b855fef4f38a..219407a7b77e 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -141,7 +141,6 @@ struct mapped_device { #define DMF_EMULATE_ZONE_APPEND 9 void disable_discard(struct mapped_device *md); -void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); static inline sector_t dm_get_size(struct mapped_device *md) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d4ae31558826..4727b72073fe 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2006,7 +2006,6 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc, */ switch (bio_op(ctx->bio_in)) { case REQ_OP_WRITE: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE_ZEROES: return true; default: diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 7ce5d509b940..0221fa63f888 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -335,7 +335,6 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_secure_erase_bios = 0; - ti->num_write_same_bios = 0; ti->num_write_zeroes_bios = 0; return 0; bad: diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2d3cda0acacb..82d431677bb6 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -304,7 +304,6 @@ static void do_region(int op, int op_flags, unsigned region, unsigned num_bvecs; sector_t remaining = where->count; struct request_queue *q = bdev_get_queue(where->bdev); - unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; unsigned int special_cmd_max_sectors; @@ -315,10 +314,8 @@ static void do_region(int op, int op_flags, unsigned region, special_cmd_max_sectors = q->limits.max_discard_sectors; else if (op == REQ_OP_WRITE_ZEROES) special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; - else if (op == REQ_OP_WRITE_SAME) - special_cmd_max_sectors = q->limits.max_write_same_sectors; - if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || - op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { + if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) && + special_cmd_max_sectors == 0) { atomic_inc(&io->count); dec_count(io, region, BLK_STS_NOTSUPP); return; @@ -337,9 +334,6 @@ static void do_region(int op, int op_flags, unsigned region, case REQ_OP_WRITE_ZEROES: num_bvecs = 0; break; - case REQ_OP_WRITE_SAME: - num_bvecs = 1; - break; default: num_bvecs = bio_max_segs(dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); @@ -356,18 +350,6 @@ static void do_region(int op, int op_flags, unsigned region, num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; - } else if (op == REQ_OP_WRITE_SAME) { - /* - * WRITE SAME only uses a single page. - */ - dp->get_page(dp, &page, &len, &offset); - bio_add_page(bio, page, logical_block_size, offset); - num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); - bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; - - offset = 0; - remaining -= num_sectors; - dp->next_page(dp); } else while (remaining) { /* * Try and add as many pages as possible. diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 1b97a11d7151..76b486e4d2be 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -60,7 +60,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_secure_erase_bios = 1; - ti->num_write_same_bios = 1; ti->num_write_zeroes_bios = 1; ti->private = lc; return 0; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index f4719b65e5e3..9ab971834a53 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1252,7 +1252,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->num_flush_bios = 1; ti->num_discard_bios = 1; - ti->num_write_same_bios = 1; ti->num_write_zeroes_bios = 1; if (m->queue_mode == DM_TYPE_BIO_BASED) ti->per_io_data_size = multipath_per_bio_data_size(); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 579ab6183d4d..3907950a0ddc 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -217,9 +217,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) if (req_op(clone) == REQ_OP_DISCARD && !clone->q->limits.max_discard_sectors) disable_discard(tio->md); - else if (req_op(clone) == REQ_OP_WRITE_SAME && - !clone->q->limits.max_write_same_sectors) - disable_write_same(tio->md); else if (req_op(clone) == REQ_OP_WRITE_ZEROES && !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e566115ec0bb..c81d331d1afe 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -157,7 +157,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_flush_bios = stripes; ti->num_discard_bios = stripes; ti->num_secure_erase_bios = stripes; - ti->num_write_same_bios = stripes; ti->num_write_zeroes_bios = stripes; sc->chunk_size = chunk_size; @@ -284,8 +283,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) } if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) || - unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) || - unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) { + unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) { target_bio_nr = dm_bio_get_target_bio_nr(bio); BUG_ON(target_bio_nr >= sc->stripes); return stripe_map_range(sc, bio, target_bio_nr); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e43096cfe9e2..88ab98637934 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1822,33 +1822,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return !blk_queue_add_random(q); } -static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !q->limits.max_write_same_sectors; -} - -static bool dm_table_supports_write_same(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->num_write_same_bios) - return false; - - if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) - return false; - } - - return true; -} - static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -2027,8 +2000,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - if (!dm_table_supports_write_same(t)) - q->limits.max_write_same_sectors = 0; if (!dm_table_supports_write_zeroes(t)) q->limits.max_write_zeroes_sectors = 0; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 6d82a34438c8..c1ca9be4b79e 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -130,7 +130,6 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) switch (bio_op(bio)) { case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE: return !op_is_flush(bio->bi_opf) && bio_sectors(bio); default: @@ -390,7 +389,6 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, case REQ_OP_ZONE_FINISH: return true; case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE: /* Writes must be aligned to the zone write pointer */ if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset) @@ -446,7 +444,6 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, blk_queue_zone_sectors(md->queue)); return BLK_STS_OK; case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE: WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); return BLK_STS_OK; @@ -503,7 +500,6 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio) return false; switch (bio_op(orig_bio)) { case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE: case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_FINISH: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c0ae8087c602..79e273924afd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -855,14 +855,6 @@ void disable_discard(struct mapped_device *md) blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); } -void disable_write_same(struct mapped_device *md) -{ - struct queue_limits *limits = dm_get_queue_limits(md); - - /* device doesn't really support WRITE SAME, disable it */ - limits->max_write_same_sectors = 0; -} - void disable_write_zeroes(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); @@ -889,9 +881,6 @@ static void clone_endio(struct bio *bio) if (bio_op(bio) == REQ_OP_DISCARD && !q->limits.max_discard_sectors) disable_discard(md); - else if (bio_op(bio) == REQ_OP_WRITE_SAME && - !q->limits.max_write_same_sectors) - disable_write_same(md); else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !q->limits.max_write_zeroes_sectors) disable_write_zeroes(md); @@ -1370,7 +1359,6 @@ static bool is_abnormal_io(struct bio *bio) switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE_ZEROES: r = true; break; @@ -1392,9 +1380,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, case REQ_OP_SECURE_ERASE: num_bios = ti->num_secure_erase_bios; break; - case REQ_OP_WRITE_SAME: - num_bios = ti->num_write_same_bios; - break; case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; break; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b26fecf6c8e8..721db99f4f63 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -316,12 +316,6 @@ struct dm_target { */ unsigned num_secure_erase_bios; - /* - * The number of WRITE SAME bios that will be submitted to the target. - * The bio number can be accessed with dm_bio_get_target_bio_nr. - */ - unsigned num_write_same_bios; - /* * The number of WRITE ZEROES bios that will be submitted to the target. * The bio number can be accessed with dm_bio_get_target_bio_nr. -- cgit v1.2.3-71-gd317 From 73bd66d9c834220579c881a3eb020fd8917075d8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Feb 2022 09:28:28 +0100 Subject: scsi: block: Remove REQ_OP_WRITE_SAME support No more users of REQ_OP_WRITE_SAME or drivers implementing it are left, so remove the infrastructure. [mkp: fold in and tweak sysfs reporting fix] Link: https://lore.kernel.org/r/20220209082828.2629273-8-hch@lst.de Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- block/blk-core.c | 13 +------ block/blk-lib.c | 93 ----------------------------------------------- block/blk-merge.c | 40 -------------------- block/blk-settings.c | 16 -------- block/blk-sysfs.c | 3 +- block/blk-zoned.c | 1 - block/blk.h | 1 - block/bounce.c | 3 -- include/linux/bio.h | 3 -- include/linux/blk_types.h | 2 - include/linux/blkdev.h | 19 ---------- kernel/trace/blktrace.c | 1 - 12 files changed, 2 insertions(+), 193 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 97f8bc8d3a79..535dbaa78ffe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -122,7 +122,6 @@ static const char *const blk_op_name[] = { REQ_OP_NAME(ZONE_CLOSE), REQ_OP_NAME(ZONE_FINISH), REQ_OP_NAME(ZONE_APPEND), - REQ_OP_NAME(WRITE_SAME), REQ_OP_NAME(WRITE_ZEROES), REQ_OP_NAME(DRV_IN), REQ_OP_NAME(DRV_OUT), @@ -734,10 +733,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio) if (!blk_queue_secure_erase(q)) goto not_supported; break; - case REQ_OP_WRITE_SAME: - if (!q->limits.max_write_same_sectors) - goto not_supported; - break; case REQ_OP_ZONE_APPEND: status = blk_check_zone_append(q, bio); if (status != BLK_STS_OK) @@ -933,13 +928,7 @@ void submit_bio(struct bio *bio) * go through the normal accounting stuff before submission. */ if (bio_has_data(bio)) { - unsigned int count; - - if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - count = queue_logical_block_size( - bdev_get_queue(bio->bi_bdev)) >> 9; - else - count = bio_sectors(bio); + unsigned int count = bio_sectors(bio); if (op_is_write(bio_op(bio))) { count_vm_events(PGPGOUT, count); diff --git a/block/blk-lib.c b/block/blk-lib.c index 9f09beadcbe3..bf5254ccdb5f 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -151,99 +151,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL(blkdev_issue_discard); -/** - * __blkdev_issue_write_same - generate number of bios with same page - * @bdev: target blockdev - * @sector: start sector - * @nr_sects: number of sectors to write - * @gfp_mask: memory allocation flags (for bio_alloc) - * @page: page containing data to write - * @biop: pointer to anchor bio - * - * Description: - * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. - */ -static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct page *page, - struct bio **biop) -{ - struct request_queue *q = bdev_get_queue(bdev); - unsigned int max_write_same_sectors; - struct bio *bio = *biop; - sector_t bs_mask; - - if (!q) - return -ENXIO; - - if (bdev_read_only(bdev)) - return -EPERM; - - bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; - if ((sector | nr_sects) & bs_mask) - return -EINVAL; - - if (!bdev_write_same(bdev)) - return -EOPNOTSUPP; - - /* Ensure that max_write_same_sectors doesn't overflow bi_size */ - max_write_same_sectors = bio_allowed_max_sectors(q); - - while (nr_sects) { - bio = blk_next_bio(bio, 1, gfp_mask); - bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); - bio->bi_vcnt = 1; - bio->bi_io_vec->bv_page = page; - bio->bi_io_vec->bv_offset = 0; - bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); - bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); - - if (nr_sects > max_write_same_sectors) { - bio->bi_iter.bi_size = max_write_same_sectors << 9; - nr_sects -= max_write_same_sectors; - sector += max_write_same_sectors; - } else { - bio->bi_iter.bi_size = nr_sects << 9; - nr_sects = 0; - } - cond_resched(); - } - - *biop = bio; - return 0; -} - -/** - * blkdev_issue_write_same - queue a write same operation - * @bdev: target blockdev - * @sector: start sector - * @nr_sects: number of sectors to write - * @gfp_mask: memory allocation flags (for bio_alloc) - * @page: page containing data - * - * Description: - * Issue a write same request for the sectors in question. - */ -int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, - struct page *page) -{ - struct bio *bio = NULL; - struct blk_plug plug; - int ret; - - blk_start_plug(&plug); - ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, - &bio); - if (ret == 0 && bio) { - ret = submit_bio_wait(bio); - bio_put(bio); - } - blk_finish_plug(&plug); - return ret; -} -EXPORT_SYMBOL(blkdev_issue_write_same); - static int __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) diff --git a/block/blk-merge.c b/block/blk-merge.c index 4de34a332c9f..87cee7e82ae1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -152,22 +152,6 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); } -static struct bio *blk_bio_write_same_split(struct request_queue *q, - struct bio *bio, - struct bio_set *bs, - unsigned *nsegs) -{ - *nsegs = 1; - - if (!q->limits.max_write_same_sectors) - return NULL; - - if (bio_sectors(bio) <= q->limits.max_write_same_sectors) - return NULL; - - return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); -} - /* * Return the maximum number of sectors from the start of a bio that may be * submitted as a single request to a block device. If enough sectors remain, @@ -351,10 +335,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio, split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, nr_segs); break; - case REQ_OP_WRITE_SAME: - split = blk_bio_write_same_split(q, *bio, &q->bio_split, - nr_segs); - break; default: split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); break; @@ -416,8 +396,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq) return 1; case REQ_OP_WRITE_ZEROES: return 0; - case REQ_OP_WRITE_SAME: - return 1; } rq_for_each_bvec(bv, rq, iter) @@ -555,8 +533,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); - else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) - nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); else if (rq->bio) nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); @@ -757,13 +733,6 @@ static enum elv_merge blk_try_req_merge(struct request *req, return ELEVATOR_NO_MERGE; } -static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) -{ - if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) - return true; - return false; -} - /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. @@ -780,10 +749,6 @@ static struct request *attempt_merge(struct request_queue *q, if (rq_data_dir(req) != rq_data_dir(next)) return NULL; - if (req_op(req) == REQ_OP_WRITE_SAME && - !blk_write_same_mergeable(req->bio, next->bio)) - return NULL; - /* * Don't allow merge of different write hints, or for a hint with * non-hint IO. @@ -912,11 +877,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (!bio_crypt_rq_ctx_compatible(rq, bio)) return false; - /* must be using the same buffer */ - if (req_op(rq) == REQ_OP_WRITE_SAME && - !blk_write_same_mergeable(rq->bio, bio)) - return false; - /* * Don't allow merge of different write hints, or for a hint with * non-hint IO. diff --git a/block/blk-settings.c b/block/blk-settings.c index b880c70e22e4..b83df3d2eebc 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -42,7 +42,6 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_dev_sectors = 0; lim->chunk_sectors = 0; - lim->max_write_same_sectors = 0; lim->max_write_zeroes_sectors = 0; lim->max_zone_append_sectors = 0; lim->max_discard_sectors = 0; @@ -79,7 +78,6 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->max_segment_size = UINT_MAX; lim->max_sectors = UINT_MAX; lim->max_dev_sectors = UINT_MAX; - lim->max_write_same_sectors = UINT_MAX; lim->max_write_zeroes_sectors = UINT_MAX; lim->max_zone_append_sectors = UINT_MAX; } @@ -178,18 +176,6 @@ void blk_queue_max_discard_sectors(struct request_queue *q, } EXPORT_SYMBOL(blk_queue_max_discard_sectors); -/** - * blk_queue_max_write_same_sectors - set max sectors for a single write same - * @q: the request queue for the device - * @max_write_same_sectors: maximum number of sectors to write per command - **/ -void blk_queue_max_write_same_sectors(struct request_queue *q, - unsigned int max_write_same_sectors) -{ - q->limits.max_write_same_sectors = max_write_same_sectors; -} -EXPORT_SYMBOL(blk_queue_max_write_same_sectors); - /** * blk_queue_max_write_zeroes_sectors - set max sectors for a single * write zeroes @@ -519,8 +505,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); - t->max_write_same_sectors = min(t->max_write_same_sectors, - b->max_write_same_sectors); t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, b->max_write_zeroes_sectors); t->max_zone_append_sectors = min(t->max_zone_append_sectors, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 9f32882ceb2f..5e81e65574a0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -214,8 +214,7 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) { - return sprintf(page, "%llu\n", - (unsigned long long)q->limits.max_write_same_sectors << 9); + return queue_var_show(0, page); } static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 774ecc598bee..61fb6c52f4f3 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -65,7 +65,6 @@ bool blk_req_needs_zone_write_lock(struct request *rq) switch (req_op(rq)) { case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: case REQ_OP_WRITE: return blk_rq_zone_is_seq(rq); default: diff --git a/block/blk.h b/block/blk.h index 8bd43b3ad33d..c6f8c0ca569f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -286,7 +286,6 @@ static inline bool blk_may_split(struct request_queue *q, struct bio *bio) case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: return true; /* non-trivial splitting decisions */ default: break; diff --git a/block/bounce.c b/block/bounce.c index 7af1a72835b9..510360559276 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -181,9 +181,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src) case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: break; - case REQ_OP_WRITE_SAME: - bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; - break; default: bio_for_each_segment(bv, bio_src, iter) bio->bi_io_vec[bio->bi_vcnt++] = bv; diff --git a/include/linux/bio.h b/include/linux/bio.h index 117d7f248ac9..eb402afa370a 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -65,7 +65,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || - bio_op(bio) == REQ_OP_WRITE_SAME || bio_op(bio) == REQ_OP_WRITE_ZEROES; } @@ -186,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio) case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: return 0; - case REQ_OP_WRITE_SAME: - return 1; default: break; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index fe065c394fff..077adf4b6e73 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -354,8 +354,6 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* write the same sector many times */ - REQ_OP_WRITE_SAME = 7, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, /* Open a zone */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9c95df26fc26..b10470d5e986 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -97,7 +97,6 @@ struct queue_limits { unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; - unsigned int max_write_same_sectors; unsigned int max_write_zeroes_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; @@ -651,9 +650,6 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, return min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); - if (unlikely(op == REQ_OP_WRITE_SAME)) - return q->limits.max_write_same_sectors; - if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; @@ -696,8 +692,6 @@ extern void blk_queue_max_discard_segments(struct request_queue *, extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); -extern void blk_queue_max_write_same_sectors(struct request_queue *q, - unsigned int max_write_same_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); @@ -842,9 +836,6 @@ static inline long nr_blockdev_pages(void) extern void blk_io_schedule(void); -extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct page *page); - #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, @@ -1071,16 +1062,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev) return q->limits.discard_alignment; } -static inline unsigned int bdev_write_same(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return q->limits.max_write_same_sectors; - - return 0; -} - static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index af68a67179b4..19514edc44f7 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1892,7 +1892,6 @@ void blk_fill_rwbs(char *rwbs, unsigned int op) switch (op & REQ_OP_MASK) { case REQ_OP_WRITE: - case REQ_OP_WRITE_SAME: rwbs[i++] = 'W'; break; case REQ_OP_DISCARD: -- cgit v1.2.3-71-gd317 From 763087dab97547230a6807c865a6a5ae53a59247 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 21 Feb 2022 19:21:12 -0800 Subject: net: add skb_set_end_offset() helper We have multiple places where this helper is convenient, and plan using it in the following patch. Signed-off-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 10 ++++++++++ net/core/skbuff.c | 19 +++++-------------- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a3e90efe6586..115be7f73487 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1536,6 +1536,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = offset; +} #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1546,6 +1551,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end - skb->head; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = skb->head + offset; +} #endif /* Internal */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9d0388bed0c1..27a2296241c9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -201,7 +201,7 @@ static void __build_skb_around(struct sk_buff *skb, void *data, skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); - skb->end = skb->tail + size; + skb_set_end_offset(skb, size); skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; @@ -1736,11 +1736,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->head = data; skb->head_frag = 0; skb->data += off; + + skb_set_end_offset(skb, size); #ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; off = nhead; -#else - skb->end = skb->head + size; #endif skb->tail += off; skb_headers_offset_update(skb, nhead); @@ -6044,11 +6043,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, skb->head = data; skb->data = data; skb->head_frag = 0; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; -#else - skb->end = skb->head + size; -#endif + skb_set_end_offset(skb, size); skb_set_tail_pointer(skb, skb_headlen(skb)); skb_headers_offset_update(skb, 0); skb->cloned = 0; @@ -6186,11 +6181,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, skb->head = data; skb->head_frag = 0; skb->data = data; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; -#else - skb->end = skb->head + size; -#endif + skb_set_end_offset(skb, size); skb_reset_tail_pointer(skb); skb_headers_offset_update(skb, 0); skb->cloned = 0; -- cgit v1.2.3-71-gd317 From 2b88cba55883eaafbc9b7cbff0b2c7cdba71ed01 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 21 Feb 2022 19:21:13 -0800 Subject: net: preserve skb_end_offset() in skb_unclone_keeptruesize() syzbot found another way to trigger the infamous WARN_ON_ONCE(delta < len) in skb_try_coalesce() [1] I was able to root cause the issue to kfence. When kfence is in action, the following assertion is no longer true: int size = xxxx; void *ptr1 = kmalloc(size, gfp); void *ptr2 = kmalloc(size, gfp); if (ptr1 && ptr2) ASSERT(ksize(ptr1) == ksize(ptr2)); We attempted to fix these issues in the blamed commits, but forgot that TCP was possibly shifting data after skb_unclone_keeptruesize() has been used, notably from tcp_retrans_try_collapse(). So we not only need to keep same skb->truesize value, we also need to make sure TCP wont fill new tailroom that pskb_expand_head() was able to get from a addr = kmalloc(...) followed by ksize(addr) Split skb_unclone_keeptruesize() into two parts: 1) Inline skb_unclone_keeptruesize() for the common case, when skb is not cloned. 2) Out of line __skb_unclone_keeptruesize() for the 'slow path'. WARNING: CPU: 1 PID: 6490 at net/core/skbuff.c:5295 skb_try_coalesce+0x1235/0x1560 net/core/skbuff.c:5295 Modules linked in: CPU: 1 PID: 6490 Comm: syz-executor161 Not tainted 5.17.0-rc4-syzkaller-00229-g4f12b742eb2b #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:skb_try_coalesce+0x1235/0x1560 net/core/skbuff.c:5295 Code: bf 01 00 00 00 0f b7 c0 89 c6 89 44 24 20 e8 62 24 4e fa 8b 44 24 20 83 e8 01 0f 85 e5 f0 ff ff e9 87 f4 ff ff e8 cb 20 4e fa <0f> 0b e9 06 f9 ff ff e8 af b2 95 fa e9 69 f0 ff ff e8 95 b2 95 fa RSP: 0018:ffffc900063af268 EFLAGS: 00010293 RAX: 0000000000000000 RBX: 00000000ffffffd5 RCX: 0000000000000000 RDX: ffff88806fc05700 RSI: ffffffff872abd55 RDI: 0000000000000003 RBP: ffff88806e675500 R08: 00000000ffffffd5 R09: 0000000000000000 R10: ffffffff872ab659 R11: 0000000000000000 R12: ffff88806dd554e8 R13: ffff88806dd9bac0 R14: ffff88806dd9a2c0 R15: 0000000000000155 FS: 00007f18014f9700(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020002000 CR3: 000000006be7a000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: tcp_try_coalesce net/ipv4/tcp_input.c:4651 [inline] tcp_try_coalesce+0x393/0x920 net/ipv4/tcp_input.c:4630 tcp_queue_rcv+0x8a/0x6e0 net/ipv4/tcp_input.c:4914 tcp_data_queue+0x11fd/0x4bb0 net/ipv4/tcp_input.c:5025 tcp_rcv_established+0x81e/0x1ff0 net/ipv4/tcp_input.c:5947 tcp_v4_do_rcv+0x65e/0x980 net/ipv4/tcp_ipv4.c:1719 sk_backlog_rcv include/net/sock.h:1037 [inline] __release_sock+0x134/0x3b0 net/core/sock.c:2779 release_sock+0x54/0x1b0 net/core/sock.c:3311 sk_wait_data+0x177/0x450 net/core/sock.c:2821 tcp_recvmsg_locked+0xe28/0x1fd0 net/ipv4/tcp.c:2457 tcp_recvmsg+0x137/0x610 net/ipv4/tcp.c:2572 inet_recvmsg+0x11b/0x5e0 net/ipv4/af_inet.c:850 sock_recvmsg_nosec net/socket.c:948 [inline] sock_recvmsg net/socket.c:966 [inline] sock_recvmsg net/socket.c:962 [inline] ____sys_recvmsg+0x2c4/0x600 net/socket.c:2632 ___sys_recvmsg+0x127/0x200 net/socket.c:2674 __sys_recvmsg+0xe2/0x1a0 net/socket.c:2704 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: c4777efa751d ("net: add and use skb_unclone_keeptruesize() helper") Fixes: 097b9146c0e2 ("net: fix up truesize of cloned skb in skb_prepare_for_shift()") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Marco Elver Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 18 +++++++++--------- net/core/skbuff.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 115be7f73487..31be38078918 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1795,19 +1795,19 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) return 0; } -/* This variant of skb_unclone() makes sure skb->truesize is not changed */ +/* This variant of skb_unclone() makes sure skb->truesize + * and skb_end_offset() are not changed, whenever a new skb->head is needed. + * + * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) + * when various debugging features are in place. + */ +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); - if (skb_cloned(skb)) { - unsigned int save = skb->truesize; - int res; - - res = pskb_expand_head(skb, 0, 0, pri); - skb->truesize = save; - return res; - } + if (skb_cloned(skb)) + return __skb_unclone_keeptruesize(skb, pri); return 0; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 27a2296241c9..725f2b356769 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1787,6 +1787,38 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) } EXPORT_SYMBOL(skb_realloc_headroom); +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) +{ + unsigned int saved_end_offset, saved_truesize; + struct skb_shared_info *shinfo; + int res; + + saved_end_offset = skb_end_offset(skb); + saved_truesize = skb->truesize; + + res = pskb_expand_head(skb, 0, 0, pri); + if (res) + return res; + + skb->truesize = saved_truesize; + + if (likely(skb_end_offset(skb) == saved_end_offset)) + return 0; + + shinfo = skb_shinfo(skb); + + /* We are about to change back skb->end, + * we need to move skb_shinfo() to its new location. + */ + memmove(skb->head + saved_end_offset, + shinfo, + offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); + + skb_set_end_offset(skb, saved_end_offset); + + return 0; +} + /** * skb_expand_head - reallocate header of &sk_buff * @skb: buffer to reallocate -- cgit v1.2.3-71-gd317 From d0b9d6dcaa5ac480c272683919f387cc6d82b638 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 23 Sep 2021 20:42:44 +0200 Subject: sched/headers: Fix header to build standalone: Uses various kernel types that don't build standalone. Signed-off-by: Ingo Molnar Reviewed-by: Peter Zijlstra --- include/linux/sched_clock.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 835ee87ed792..cb41c5edb4d4 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -5,6 +5,8 @@ #ifndef LINUX_SCHED_CLOCK #define LINUX_SCHED_CLOCK +#include + #ifdef CONFIG_GENERIC_SCHED_CLOCK /** * struct clock_read_data - data required to read from sched_clock() -- cgit v1.2.3-71-gd317 From 669f45f19cf7feea7066dce86a0ce89dfc8a2065 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 22 Feb 2022 13:23:59 +0100 Subject: sched/headers: Add initial new headers as identity mappings This allows code sharing between fast-headers tree and the vanilla scheduler tree. Signed-off-by: Ingo Molnar Reviewed-by: Peter Zijlstra --- arch/arm64/include/asm/paravirt_api_clock.h | 1 + arch/x86/include/asm/paravirt_api_clock.h | 1 + include/linux/cgroup_api.h | 1 + include/linux/cpumask_api.h | 1 + include/linux/fs_api.h | 1 + include/linux/gfp_api.h | 1 + include/linux/hashtable_api.h | 1 + include/linux/hrtimer_api.h | 1 + include/linux/kobject_api.h | 1 + include/linux/kref_api.h | 1 + include/linux/ktime_api.h | 1 + include/linux/llist_api.h | 1 + include/linux/lockdep_api.h | 1 + include/linux/mm_api.h | 1 + include/linux/mutex_api.h | 1 + include/linux/perf_event_api.h | 1 + include/linux/pgtable_api.h | 1 + include/linux/ptrace_api.h | 1 + include/linux/rcuwait_api.h | 1 + include/linux/refcount_api.h | 1 + include/linux/sched/affinity.h | 1 + include/linux/sched/cond_resched.h | 1 + include/linux/sched/posix-timers.h | 1 + include/linux/sched/rseq_api.h | 1 + include/linux/sched/task_flags.h | 1 + include/linux/sched/thread_info_api.h | 1 + include/linux/seqlock_api.h | 1 + include/linux/softirq.h | 1 + include/linux/spinlock_api.h | 1 + include/linux/swait_api.h | 1 + include/linux/syscalls_api.h | 1 + include/linux/u64_stats_sync_api.h | 1 + include/linux/wait_api.h | 1 + include/linux/workqueue_api.h | 1 + 34 files changed, 34 insertions(+) create mode 100644 arch/arm64/include/asm/paravirt_api_clock.h create mode 100644 arch/x86/include/asm/paravirt_api_clock.h create mode 100644 include/linux/cgroup_api.h create mode 100644 include/linux/cpumask_api.h create mode 100644 include/linux/fs_api.h create mode 100644 include/linux/gfp_api.h create mode 100644 include/linux/hashtable_api.h create mode 100644 include/linux/hrtimer_api.h create mode 100644 include/linux/kobject_api.h create mode 100644 include/linux/kref_api.h create mode 100644 include/linux/ktime_api.h create mode 100644 include/linux/llist_api.h create mode 100644 include/linux/lockdep_api.h create mode 100644 include/linux/mm_api.h create mode 100644 include/linux/mutex_api.h create mode 100644 include/linux/perf_event_api.h create mode 100644 include/linux/pgtable_api.h create mode 100644 include/linux/ptrace_api.h create mode 100644 include/linux/rcuwait_api.h create mode 100644 include/linux/refcount_api.h create mode 100644 include/linux/sched/affinity.h create mode 100644 include/linux/sched/cond_resched.h create mode 100644 include/linux/sched/posix-timers.h create mode 100644 include/linux/sched/rseq_api.h create mode 100644 include/linux/sched/task_flags.h create mode 100644 include/linux/sched/thread_info_api.h create mode 100644 include/linux/seqlock_api.h create mode 100644 include/linux/softirq.h create mode 100644 include/linux/spinlock_api.h create mode 100644 include/linux/swait_api.h create mode 100644 include/linux/syscalls_api.h create mode 100644 include/linux/u64_stats_sync_api.h create mode 100644 include/linux/wait_api.h create mode 100644 include/linux/workqueue_api.h (limited to 'include/linux') diff --git a/arch/arm64/include/asm/paravirt_api_clock.h b/arch/arm64/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..65ac7cee0dad --- /dev/null +++ b/arch/arm64/include/asm/paravirt_api_clock.h @@ -0,0 +1 @@ +#include diff --git a/arch/x86/include/asm/paravirt_api_clock.h b/arch/x86/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..65ac7cee0dad --- /dev/null +++ b/arch/x86/include/asm/paravirt_api_clock.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h new file mode 100644 index 000000000000..d0cfe8025111 --- /dev/null +++ b/include/linux/cgroup_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h new file mode 100644 index 000000000000..83bd3ebe82b0 --- /dev/null +++ b/include/linux/cpumask_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h new file mode 100644 index 000000000000..83be38d6d413 --- /dev/null +++ b/include/linux/fs_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h new file mode 100644 index 000000000000..5a05a2764a86 --- /dev/null +++ b/include/linux/gfp_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h new file mode 100644 index 000000000000..c268ac2c5c0e --- /dev/null +++ b/include/linux/hashtable_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h new file mode 100644 index 000000000000..8d9700894468 --- /dev/null +++ b/include/linux/hrtimer_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h new file mode 100644 index 000000000000..6e36a054c2d6 --- /dev/null +++ b/include/linux/kobject_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h new file mode 100644 index 000000000000..d67e554721d2 --- /dev/null +++ b/include/linux/kref_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h new file mode 100644 index 000000000000..f697d493960f --- /dev/null +++ b/include/linux/ktime_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h new file mode 100644 index 000000000000..625bec0393a1 --- /dev/null +++ b/include/linux/llist_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h new file mode 100644 index 000000000000..907e66979ab2 --- /dev/null +++ b/include/linux/lockdep_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h new file mode 100644 index 000000000000..a5ace2b198b8 --- /dev/null +++ b/include/linux/mm_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h new file mode 100644 index 000000000000..85ab9491e13e --- /dev/null +++ b/include/linux/mutex_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h new file mode 100644 index 000000000000..c2fd6048b790 --- /dev/null +++ b/include/linux/perf_event_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h new file mode 100644 index 000000000000..ff367a4ba8c4 --- /dev/null +++ b/include/linux/pgtable_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h new file mode 100644 index 000000000000..26e7d275ad8d --- /dev/null +++ b/include/linux/ptrace_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h new file mode 100644 index 000000000000..f962e28544dd --- /dev/null +++ b/include/linux/rcuwait_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h new file mode 100644 index 000000000000..5f032589f568 --- /dev/null +++ b/include/linux/refcount_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/affinity.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/cond_resched.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h new file mode 100644 index 000000000000..523a381d6c88 --- /dev/null +++ b/include/linux/sched/posix-timers.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h new file mode 100644 index 000000000000..cf2af72693e1 --- /dev/null +++ b/include/linux/sched/rseq_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/task_flags.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h new file mode 100644 index 000000000000..2c60fbc16c08 --- /dev/null +++ b/include/linux/sched/thread_info_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h new file mode 100644 index 000000000000..be91e7d3b826 --- /dev/null +++ b/include/linux/seqlock_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/softirq.h b/include/linux/softirq.h new file mode 100644 index 000000000000..c73d7dcb4cb5 --- /dev/null +++ b/include/linux/softirq.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/spinlock_api.h b/include/linux/spinlock_api.h new file mode 100644 index 000000000000..6338b27f98df --- /dev/null +++ b/include/linux/spinlock_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/swait_api.h b/include/linux/swait_api.h new file mode 100644 index 000000000000..1eeaaaaa5ea7 --- /dev/null +++ b/include/linux/swait_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/syscalls_api.h b/include/linux/syscalls_api.h new file mode 100644 index 000000000000..23e012b04db4 --- /dev/null +++ b/include/linux/syscalls_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/u64_stats_sync_api.h b/include/linux/u64_stats_sync_api.h new file mode 100644 index 000000000000..c72ca63da44b --- /dev/null +++ b/include/linux/u64_stats_sync_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/wait_api.h b/include/linux/wait_api.h new file mode 100644 index 000000000000..4e930548935a --- /dev/null +++ b/include/linux/wait_api.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/workqueue_api.h b/include/linux/workqueue_api.h new file mode 100644 index 000000000000..77debb5d2760 --- /dev/null +++ b/include/linux/workqueue_api.h @@ -0,0 +1 @@ +#include -- cgit v1.2.3-71-gd317 From fbed5664b73830dcb1a19af4f7f1f1b424f54609 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 22 Feb 2022 13:28:20 +0100 Subject: sched/headers: Make the header build standalone This header depends on various scheduler definitions. Signed-off-by: Ingo Molnar Reviewed-by: Peter Zijlstra --- include/linux/sched/deadline.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 1aff00b65f3c..7c83d4d5a971 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -6,6 +6,8 @@ * NORMAL/BATCH tasks. */ +#include + #define MAX_DL_PRIO 0 static inline int dl_prio(int prio) -- cgit v1.2.3-71-gd317 From b26ef81c46ed15d11ddddba9ba1cd52c749385ad Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 22 Feb 2022 14:04:50 -0800 Subject: drop_monitor: remove quadratic behavior drop_monitor is using an unique list on which all netdevices in the host have an element, regardless of their netns. This scales poorly, not only at device unregister time (what I caught during my netns dismantle stress tests), but also at packet processing time whenever trace_napi_poll_hit() is called. If the intent was to avoid adding one pointer in 'struct net_device' then surely we prefer O(1) behavior. Signed-off-by: Eric Dumazet Cc: Neil Horman Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++- net/core/drop_monitor.c | 79 ++++++++++++++--------------------------------- 2 files changed, 27 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 93fc680b658f..c79ee2296296 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2236,7 +2236,9 @@ struct net_device { #if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; #endif - +#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) + struct dm_hw_stat_delta __rcu *dm_private; +#endif struct device dev; const struct attribute_group *sysfs_groups[4]; const struct attribute_group *sysfs_rx_queue_group; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index d7d177f75d43..b89e3e95bffc 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -64,7 +64,6 @@ static const char * const drop_reasons[] = { /* net_dm_mutex * * An overall lock guarding every operation coming from userspace. - * It also guards the global 'hw_stats_list' list. */ static DEFINE_MUTEX(net_dm_mutex); @@ -100,11 +99,9 @@ struct per_cpu_dm_data { }; struct dm_hw_stat_delta { - struct net_device *dev; unsigned long last_rx; - struct list_head list; - struct rcu_head rcu; unsigned long last_drop_val; + struct rcu_head rcu; }; static struct genl_family net_drop_monitor_family; @@ -115,7 +112,6 @@ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data); static int dm_hit_limit = 64; static int dm_delay = 1; static unsigned long dm_hw_check_delta = 2*HZ; -static LIST_HEAD(hw_stats_list); static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY; static u32 net_dm_trunc_len; @@ -287,33 +283,27 @@ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, int work, int budget) { - struct dm_hw_stat_delta *new_stat; - + struct net_device *dev = napi->dev; + struct dm_hw_stat_delta *stat; /* * Don't check napi structures with no associated device */ - if (!napi->dev) + if (!dev) return; rcu_read_lock(); - list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { - struct net_device *dev; - + stat = rcu_dereference(dev->dm_private); + if (stat) { /* * only add a note to our monitor buffer if: - * 1) this is the dev we received on - * 2) its after the last_rx delta - * 3) our rx_dropped count has gone up + * 1) its after the last_rx delta + * 2) our rx_dropped count has gone up */ - /* Paired with WRITE_ONCE() in dropmon_net_event() */ - dev = READ_ONCE(new_stat->dev); - if ((dev == napi->dev) && - (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && - (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { + if (time_after(jiffies, stat->last_rx + dm_hw_check_delta) && + (dev->stats.rx_dropped != stat->last_drop_val)) { trace_drop_common(NULL, NULL); - new_stat->last_drop_val = napi->dev->stats.rx_dropped; - new_stat->last_rx = jiffies; - break; + stat->last_drop_val = dev->stats.rx_dropped; + stat->last_rx = jiffies; } } rcu_read_unlock(); @@ -1198,7 +1188,6 @@ err_module_put: static void net_dm_trace_off_set(void) { - struct dm_hw_stat_delta *new_stat, *temp; const struct net_dm_alert_ops *ops; int cpu; @@ -1222,13 +1211,6 @@ static void net_dm_trace_off_set(void) consume_skb(skb); } - list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { - if (new_stat->dev == NULL) { - list_del_rcu(&new_stat->list); - kfree_rcu(new_stat, rcu); - } - } - module_put(THIS_MODULE); } @@ -1589,41 +1571,28 @@ static int dropmon_net_event(struct notifier_block *ev_block, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct dm_hw_stat_delta *new_stat = NULL; - struct dm_hw_stat_delta *tmp; + struct dm_hw_stat_delta *stat; switch (event) { case NETDEV_REGISTER: - new_stat = kzalloc(sizeof(struct dm_hw_stat_delta), GFP_KERNEL); + if (WARN_ON_ONCE(rtnl_dereference(dev->dm_private))) + break; + stat = kzalloc(sizeof(*stat), GFP_KERNEL); + if (!stat) + break; - if (!new_stat) - goto out; + stat->last_rx = jiffies; + rcu_assign_pointer(dev->dm_private, stat); - new_stat->dev = dev; - new_stat->last_rx = jiffies; - mutex_lock(&net_dm_mutex); - list_add_rcu(&new_stat->list, &hw_stats_list); - mutex_unlock(&net_dm_mutex); break; case NETDEV_UNREGISTER: - mutex_lock(&net_dm_mutex); - list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { - if (new_stat->dev == dev) { - - /* Paired with READ_ONCE() in trace_napi_poll_hit() */ - WRITE_ONCE(new_stat->dev, NULL); - - if (trace_state == TRACE_OFF) { - list_del_rcu(&new_stat->list); - kfree_rcu(new_stat, rcu); - break; - } - } + stat = rtnl_dereference(dev->dm_private); + if (stat) { + rcu_assign_pointer(dev->dm_private, NULL); + kfree_rcu(stat, rcu); } - mutex_unlock(&net_dm_mutex); break; } -out: return NOTIFY_DONE; } -- cgit v1.2.3-71-gd317 From a21d9a670d81103db7f788de1a4a4a6e4b891a0b Mon Sep 17 00:00:00 2001 From: Hans Schultz Date: Wed, 23 Feb 2022 11:16:46 +0100 Subject: net: bridge: Add support for bridge port in locked mode In a 802.1X scenario, clients connected to a bridge port shall not be allowed to have traffic forwarded until fully authenticated. A static fdb entry of the clients MAC address for the bridge port unlocks the client and allows bidirectional communication. This scenario is facilitated with setting the bridge port in locked mode, which is also supported by various switchcore chipsets. Signed-off-by: Hans Schultz Acked-by: Nikolay Aleksandrov Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + include/uapi/linux/if_link.h | 1 + net/bridge/br_input.c | 11 ++++++++++- net/bridge/br_netlink.c | 6 +++++- 4 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 509e18c7e740..3aae023a9353 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -58,6 +58,7 @@ struct br_ip_list { #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) #define BR_TX_FWD_OFFLOAD BIT(20) +#define BR_PORT_LOCKED BIT(21) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index e1ba2d51b717..be09d2ad4b5d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -537,6 +537,7 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index b50382f957c1..e0c13fcc50ed 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -81,6 +81,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (!p || p->state == BR_STATE_DISABLED) goto drop; + br = p->br; brmctx = &p->br->multicast_ctx; pmctx = &p->multicast_ctx; state = p->state; @@ -88,10 +89,18 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb &state, &vlan)) goto out; + if (p->flags & BR_PORT_LOCKED) { + struct net_bridge_fdb_entry *fdb_src = + br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid); + + if (!fdb_src || READ_ONCE(fdb_src->dst) != p || + test_bit(BR_FDB_LOCAL, &fdb_src->flags)) + goto drop; + } + nbp_switchdev_frame_mark(p, skb); /* insert into forwarding database after filtering to avoid spoofing */ - br = p->br; if (p->flags & BR_LEARNING) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff83d84230d..7d4432ca9a20 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -184,6 +184,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ + + nla_total_size(1) /* IFLA_BRPORT_LOCKED */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ @@ -269,7 +270,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, BR_MRP_LOST_CONT)) || nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, !!(p->flags & BR_MRP_LOST_IN_CONT)) || - nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) + nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || + nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED))) return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); @@ -827,6 +829,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, + [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 }, [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 }, }; @@ -893,6 +896,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS); br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); + br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED); changed_mask = old_flags ^ p->flags; -- cgit v1.2.3-71-gd317 From c2700d2886a87f83f31e0a301de1d2350b52c79b Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Sat, 22 Jan 2022 22:27:44 +0530 Subject: nvme-tcp: send H2CData PDUs based on MAXH2CDATA As per NVMe/TCP specification (revision 1.0a, section 3.6.2.3) Maximum Host to Controller Data length (MAXH2CDATA): Specifies the maximum number of PDU-Data bytes per H2CData PDU in bytes. This value is a multiple of dwords and should be no less than 4,096. Current code sets H2CData PDU data_length to r2t_length, it does not check MAXH2CDATA value. Fix this by setting H2CData PDU data_length to min(req->h2cdata_left, queue->maxh2cdata). Also validate MAXH2CDATA value returned by target in ICResp PDU, if it is not a multiple of dword or if it is less than 4096 return -EINVAL from nvme_tcp_init_connection(). Signed-off-by: Varun Prakash Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 63 +++++++++++++++++++++++++++++++++++++----------- include/linux/nvme-tcp.h | 1 + 2 files changed, 50 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 891a36d02e7c..65e00c64a588 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -44,6 +44,8 @@ struct nvme_tcp_request { u32 data_len; u32 pdu_len; u32 pdu_sent; + u32 h2cdata_left; + u32 h2cdata_offset; u16 ttag; __le16 status; struct list_head entry; @@ -95,6 +97,7 @@ struct nvme_tcp_queue { struct nvme_tcp_request *request; int queue_size; + u32 maxh2cdata; size_t cmnd_capsule_len; struct nvme_tcp_ctrl *ctrl; unsigned long flags; @@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, - struct nvme_tcp_r2t_pdu *pdu) +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_data_pdu *data = req->pdu; struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); + u32 h2cdata_sent = req->pdu_len; u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); req->state = NVME_TCP_SEND_H2C_PDU; req->offset = 0; - req->pdu_len = le32_to_cpu(pdu->r2t_length); + req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); req->pdu_sent = 0; + req->h2cdata_left -= req->pdu_len; + req->h2cdata_offset += h2cdata_sent; memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; - data->hdr.flags = NVME_TCP_F_DATA_LAST; + if (!req->h2cdata_left) + data->hdr.flags = NVME_TCP_F_DATA_LAST; if (queue->hdr_digest) data->hdr.flags |= NVME_TCP_F_HDGST; if (queue->data_digest) @@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->hdr.pdo = data->hdr.hlen + hdgst; data->hdr.plen = cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); - data->ttag = pdu->ttag; + data->ttag = req->ttag; data->command_id = nvme_cid(rq); - data->data_offset = pdu->r2t_offset; + data->data_offset = cpu_to_le32(req->h2cdata_offset); data->data_length = cpu_to_le32(req->pdu_len); } @@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, struct nvme_tcp_request *req; struct request *rq; u32 r2t_length = le32_to_cpu(pdu->r2t_length); + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return -EPROTO; } - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + if (unlikely(r2t_offset < req->data_sent)) { dev_err(queue->ctrl->ctrl.device, "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + rq->tag, r2t_offset, req->data_sent); return -EPROTO; } - nvme_tcp_setup_h2c_data_pdu(req, pdu); + req->pdu_len = 0; + req->h2cdata_left = r2t_length; + req->h2cdata_offset = r2t_offset; + req->ttag = pdu->ttag; + + nvme_tcp_setup_h2c_data_pdu(req); nvme_tcp_queue_request(req, false, true); return 0; @@ -928,6 +940,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; int req_data_len = req->data_len; + u32 h2cdata_left = req->h2cdata_left; while (true) { struct page *page = nvme_tcp_req_cur_page(req); @@ -972,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); } return 1; } @@ -1030,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (queue->hdr_digest && !req->offset) nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); - ret = kernel_sendpage(queue->sock, virt_to_page(pdu), - offset_in_page(pdu) + req->offset, len, - MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + if (!req->h2cdata_left) + ret = kernel_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); + else + ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), + offset_in_page(pdu) + req->offset, len, + MSG_DONTWAIT | MSG_MORE); if (unlikely(ret <= 0)) return ret; @@ -1052,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; size_t offset = req->offset; + u32 h2cdata_left = req->h2cdata_left; int ret; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { @@ -1069,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) return ret; if (offset + ret == NVME_TCP_DIGEST_LENGTH) { - nvme_tcp_done_send_req(queue); + if (h2cdata_left) + nvme_tcp_setup_h2c_data_pdu(req); + else + nvme_tcp_done_send_req(queue); return 1; } @@ -1261,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) struct msghdr msg = {}; struct kvec iov; bool ctrl_hdgst, ctrl_ddgst; + u32 maxh2cdata; int ret; icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); @@ -1344,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) goto free_icresp; } + maxh2cdata = le32_to_cpu(icresp->maxdata); + if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) { + pr_err("queue %d: invalid maxh2cdata returned %u\n", + nvme_tcp_queue_id(queue), maxh2cdata); + goto free_icresp; + } + queue->maxh2cdata = maxh2cdata; + ret = 0; free_icresp: kfree(icresp); @@ -2329,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, req->data_sent = 0; req->pdu_len = 0; req->pdu_sent = 0; + req->h2cdata_left = 0; req->data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; req->curr_bio = rq->bio; diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index 959e0bd9a913..75470159a194 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -12,6 +12,7 @@ #define NVME_TCP_DISC_PORT 8009 #define NVME_TCP_ADMIN_CCSZ SZ_8K #define NVME_TCP_DIGEST_LENGTH 4 +#define NVME_TCP_MIN_MAXH2CDATA 4096 enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, -- cgit v1.2.3-71-gd317 From f2eb478f2f322217aa642e11c1cc011f99c797e6 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 22 Feb 2022 08:07:13 +0100 Subject: kernfs: move struct kernfs_root out of the public view. There is no need to have struct kernfs_root be part of kernfs.h for the whole kernel to see and poke around it. Move it internal to kernfs code and provide a helper function, kernfs_root_to_node(), to handle the one field that kernfs users were directly accessing from the structure. Cc: Imran Khan Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20220222070713.3517679-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4 ++-- fs/kernfs/dir.c | 9 +++++++++ fs/kernfs/kernfs-internal.h | 18 ++++++++++++++++++ fs/sysfs/mount.c | 2 +- include/linux/kernfs.h | 4 ++++ kernel/cgroup/cgroup.c | 4 ++-- 6 files changed, 36 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index b57b3db9a6a7..83f901e2c2df 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3221,13 +3221,13 @@ static int __init rdtgroup_setup_root(void) list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE); + ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); if (ret) { kernfs_destroy_root(rdt_root); goto out; } - rdtgroup_default.kn = rdt_root->kn; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); kernfs_activate(rdtgroup_default.kn); out: diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index e6d9772ddb4c..61a8edc4ba8b 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -970,6 +970,15 @@ void kernfs_destroy_root(struct kernfs_root *root) kernfs_put(root->kn); /* will also free @root */ } +/** + * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root + * @root: root to use to lookup + */ +struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root) +{ + return root->kn; +} + /** * kernfs_create_dir_ns - create a directory * @parent: parent in which to create a new directory diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index fc3b32f82a60..eeaa779b929c 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -31,6 +31,24 @@ struct kernfs_iattrs { atomic_t user_xattr_size; }; +struct kernfs_root { + /* published fields */ + struct kernfs_node *kn; + unsigned int flags; /* KERNFS_ROOT_* flags */ + + /* private fields, do not use outside kernfs proper */ + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + + /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ + struct list_head supers; + + wait_queue_head_t deactivate_waitq; + struct rw_semaphore kernfs_rwsem; +}; + /* +1 to avoid triggering overflow warning when negating it */ #define KN_DEACTIVATED_BIAS (INT_MIN + 1) diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index e747c135c1d1..98467bb76737 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -103,7 +103,7 @@ int __init sysfs_init(void) if (IS_ERR(sysfs_root)) return PTR_ERR(sysfs_root); - sysfs_root_kn = sysfs_root->kn; + sysfs_root_kn = kernfs_root_to_node(sysfs_root); err = register_filesystem(&sysfs_fs_type); if (err) { diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 861c4f0f8a29..62aff082dc3f 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -185,6 +185,7 @@ struct kernfs_syscall_ops { struct kernfs_root *root); }; +#if 0 struct kernfs_root { /* published fields */ struct kernfs_node *kn; @@ -202,6 +203,9 @@ struct kernfs_root { wait_queue_head_t deactivate_waitq; struct rw_semaphore kernfs_rwsem; }; +#endif + +struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root); struct kernfs_open_file { /* published fields */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index b31e1465868a..a800c3b1b795 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1302,7 +1302,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) { - struct cgroup *root_cgrp = kf_root->kn->priv; + struct cgroup *root_cgrp = kernfs_root_to_node(kf_root)->priv; return root_cgrp->root; } @@ -2025,7 +2025,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) ret = PTR_ERR(root->kf_root); goto exit_root_id; } - root_cgrp->kn = root->kf_root->kn; + root_cgrp->kn = kernfs_root_to_node(root->kf_root); WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1); root_cgrp->ancestor_ids[0] = cgroup_id(root_cgrp); -- cgit v1.2.3-71-gd317 From c404c64d64bc31bebe8a2015103671f7cd282731 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Sun, 30 Jan 2022 22:02:06 +0100 Subject: powercap/dtpm: Destroy hierarchy function The hierarchy creation function exits but without a destroy hierarchy function. Due to that, the modules creating the hierarchy can not be unloaded properly because they don't have an exit callback. Provide the dtpm_destroy_hierarchy() function to remove the previously created hierarchy. The function relies on all the release mechanisms implemented by the underlying powercap framework. Signed-off-by: Daniel Lezcano Reviewed-by: Ulf Hansson Link: https://lore.kernel.org/r/20220130210210.549877-4-daniel.lezcano@linaro.org --- drivers/powercap/dtpm.c | 43 +++++++++++++++++++++++++++++++++++++++++++ include/linux/dtpm.h | 3 +++ 2 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index 7bddd25a6767..d9d74f981118 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -617,3 +617,46 @@ out_unlock: return ret; } EXPORT_SYMBOL_GPL(dtpm_create_hierarchy); + +static void __dtpm_destroy_hierarchy(struct dtpm *dtpm) +{ + struct dtpm *child, *aux; + + list_for_each_entry_safe(child, aux, &dtpm->children, sibling) + __dtpm_destroy_hierarchy(child); + + /* + * At this point, we know all children were removed from the + * recursive call before + */ + dtpm_unregister(dtpm); +} + +void dtpm_destroy_hierarchy(void) +{ + int i; + + mutex_lock(&dtpm_lock); + + if (!pct) + goto out_unlock; + + __dtpm_destroy_hierarchy(root); + + + for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) { + + if (!dtpm_subsys[i]->exit) + continue; + + dtpm_subsys[i]->exit(); + } + + powercap_unregister_control_type(pct); + + pct = NULL; + +out_unlock: + mutex_unlock(&dtpm_lock); +} +EXPORT_SYMBOL_GPL(dtpm_destroy_hierarchy); diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index f7a25c70dd4c..a4a13514b730 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -37,6 +37,7 @@ struct device_node; struct dtpm_subsys_ops { const char *name; int (*init)(void); + void (*exit)(void); int (*setup)(struct dtpm *, struct device_node *); }; @@ -67,4 +68,6 @@ void dtpm_unregister(struct dtpm *dtpm); int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table); + +void dtpm_destroy_hierarchy(void); #endif -- cgit v1.2.3-71-gd317 From 43c075959de3c45608636d9d80ff9e61d166fb21 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jan 2022 10:57:07 -0800 Subject: mlx5: remove unused static inlines mlx5 has some unused static inline helpers in include/ while at it also clean static inlines in the driver itself. Signed-off-by: Jakub Kicinski Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h | 9 --------- drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h | 7 ------- include/linux/mlx5/driver.h | 10 ---------- 3 files changed, 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index d964665eaa63..62cde3e87c2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, return true; } -static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state) -{ -#ifdef CONFIG_MLX5_EN_IPSEC - return mlx5e_ipsec_is_tx_flow(&state->ipsec); -#else - return false; -#endif -} - static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, struct mlx5e_accel_tx_state *state) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h index 4bad6a5fde56..f240ffe5116c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h @@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca, static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent) { } - -static inline int -mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent, - void *buf, int len) -{ - return 0; -} #endif #endif /* __LIB_HV_VHCA_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 78655d8d13a7..1b398c9e17b9 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -863,20 +863,10 @@ struct mlx5_hca_vport_context { bool grh_required; }; -static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) -{ - return buf->frags->buf + offset; -} - #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field -static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) -{ - return pci_get_drvdata(pdev); -} - extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) -- cgit v1.2.3-71-gd317 From c2c922dae77f36e24d246c6e310cee0c61afc6fb Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Mon, 29 Nov 2021 16:24:28 +0200 Subject: net/mlx5: Add ability to insert to specific flow group If the flow table isn't an autogroup the upper driver has to create the flow groups explicitly. This information can't later be used when creating rules to insert into a specific flow group. Allow such use case. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 9 ++++++++- include/linux/mlx5/fs.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b628917e38e4..ebb7960ec62b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked) static int build_match_list(struct match_list *match_head, struct mlx5_flow_table *ft, const struct mlx5_flow_spec *spec, + struct mlx5_flow_group *fg, bool ft_locked) { struct rhlist_head *tmp, *list; @@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head, rhl_for_each_entry_rcu(g, tmp, list, hash) { struct match_list *curr_match; + if (fg && fg != g) + continue; + if (unlikely(!tree_get_node(&g->node))) continue; @@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (!check_valid_spec(spec)) return ERR_PTR(-EINVAL); + if (flow_act->fg && ft->autogroup.active) + return ERR_PTR(-EINVAL); + for (i = 0; i < dest_num; i++) { if (!dest_is_valid(&dest[i], flow_act, ft)) return ERR_PTR(-EINVAL); @@ -1898,7 +1905,7 @@ search_again_locked: version = atomic_read(&ft->node.version); /* Collect all fgs which has a matching match_criteria */ - err = build_match_list(&match_head, ft, spec, take_write); + err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write); if (err) { if (take_write) up_write_ref_node(&ft->node, false); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b1aad14689e3..e3bfed68b08a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -224,6 +224,7 @@ struct mlx5_flow_act { u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; + struct mlx5_flow_group *fg; }; #define MLX5_DECLARE_FLOW_ACT(name) \ -- cgit v1.2.3-71-gd317 From 605bef0015b163867127202b821dce79804d603d Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sun, 5 Apr 2020 17:50:25 -0700 Subject: net/mlx5: cmdif, cmd_check refactoring Do not mangle the command outbox in the internal low level cmd_exec and cmd_invoke functions. Instead return a proper unique error code and move the driver error checking to be at a higher level in mlx5_cmd_exec(). Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 173 +++++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/main.c | 5 +- include/linux/mlx5/driver.h | 1 - 3 files changed, 95 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3c6a533ee0c9..7ff01b901f53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -760,43 +760,61 @@ struct mlx5_ifc_mbox_in_bits { u8 reserved_at_40[0x40]; }; -void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) -{ - *status = MLX5_GET(mbox_out, out, status); - *syndrome = MLX5_GET(mbox_out, out, syndrome); -} - -static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) +static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { + u16 opcode, op_mod; u32 syndrome; u8 status; - u16 opcode; - u16 op_mod; u16 uid; + int err; - mlx5_cmd_mbox_status(out, &status, &syndrome); - if (!status) - return 0; + syndrome = MLX5_GET(mbox_out, out, syndrome); + status = MLX5_GET(mbox_out, out, status); opcode = MLX5_GET(mbox_in, in, opcode); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); + err = cmd_status_to_err(status); + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) mlx5_core_err_rl(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", mlx5_command_str(opcode), opcode, op_mod, - cmd_status_str(status), status, syndrome); + cmd_status_str(status), status, syndrome, err); else mlx5_core_dbg(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", - mlx5_command_str(opcode), - opcode, op_mod, - cmd_status_str(status), - status, - syndrome); + "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", + mlx5_command_str(opcode), opcode, op_mod, uid, + cmd_status_str(status), status, syndrome, err); +} + +static int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) +{ + /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ + if (err == -ENXIO) { + u16 opcode = MLX5_GET(mbox_in, in, opcode); + u32 syndrome; + u8 status; + + /* PCI Error, emulate command return status, for smooth reset */ + err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status); + MLX5_SET(mbox_out, out, status, status); + MLX5_SET(mbox_out, out, syndrome, syndrome); + if (!err) + return 0; + } + + /* driver or FW delivery error */ + if (err) + return err; - return cmd_status_to_err(status); + /* check outbox status */ + err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); + if (err) + cmd_status_print(dev, in, out); + + return err; } static void dump_command(struct mlx5_core_dev *dev, @@ -980,13 +998,7 @@ static void cmd_work_handler(struct work_struct *work) /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { - u8 status = 0; - u32 drv_synd; - - ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); - MLX5_SET(mbox_out, ent->out, status, status); - MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); - + ent->ret = -ENXIO; mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); return; } @@ -1005,6 +1017,31 @@ static void cmd_work_handler(struct work_struct *work) } } +static int deliv_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + case MLX5_DRIVER_STATUS_ABORTED: + return 0; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return -EBADR; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return -EFAULT; /* Bad address */ + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return -ENOMSG; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return -EIO; + default: + return -EINVAL; + } +} + static const char *deliv_status_to_str(u8 status) { switch (status) { @@ -1622,15 +1659,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); dump_command(dev, ent, 0); - if (!ent->ret) { + + if (vec & MLX5_TRIGGERED_CMD_COMP) + ent->ret = -ENXIO; + + if (!ent->ret) { /* Command completed by FW */ if (!cmd->checksum_disabled) ent->ret = verify_signature(ent); - else - ent->ret = 0; - if (vec & MLX5_TRIGGERED_CMD_COMP) - ent->status = MLX5_DRIVER_STATUS_ABORTED; - else - ent->status = ent->lay->status_own >> 1; + + ent->status = ent->lay->status_own >> 1; mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); @@ -1649,14 +1686,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force callback = ent->callback; context = ent->context; err = ent->ret; - if (!err) { + if (!err && !ent->status) { err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); - err = err ? err : mlx5_cmd_check(dev, - ent->in->first.data, - ent->uout); + err = mlx5_cmd_check(dev, err, ent->in->first.data, + ent->uout); } mlx5_free_cmd_msg(dev, ent->out); @@ -1729,31 +1765,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) up(&cmd->sem); } -static int deliv_status_to_err(u8 status) -{ - switch (status) { - case MLX5_CMD_DELIVERY_STAT_OK: - case MLX5_DRIVER_STATUS_ABORTED: - return 0; - case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: - case MLX5_CMD_DELIVERY_STAT_TOK_ERR: - return -EBADR; - case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: - case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: - case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: - return -EFAULT; /* Bad address */ - case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: - case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: - case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: - case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: - return -ENOMSG; - case MLX5_CMD_DELIVERY_STAT_FW_ERR: - return -EIO; - default: - return -EINVAL; - } -} - static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { @@ -1812,15 +1823,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, u8 token; int err; - if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) { - u32 drv_synd; - u8 status; - - err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); - MLX5_SET(mbox_out, out, status, status); - MLX5_SET(mbox_out, out, syndrome, drv_synd); - return err; - } + if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) + return -ENXIO; pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; @@ -1865,13 +1869,24 @@ out_in: return err; } +/** + * mlx5_cmd_exec - Executes a fw command, wait for completion + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: 0 if no error, FW command execution was successful, + * and outbox status is ok. + */ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - int err; + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - return err ? : mlx5_cmd_check(dev, in, out); + return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); @@ -1932,11 +1947,9 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb); int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - int err; - - err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - return err ? : mlx5_cmd_check(dev, in, out); + return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec_polling); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2c774f367199..cea1a8ac196e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out); if (err) { - u32 syndrome; - u8 status; + u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome); + u8 status = MLX5_GET(query_issi_out, query_out, status); - mlx5_cmd_mbox_status(query_out, &status, &syndrome); if (!status || syndrome == MLX5_DRIVER_SYND) { mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n", err, status, syndrome); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1b398c9e17b9..8a8408708e6c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -981,7 +981,6 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); -- cgit v1.2.3-71-gd317 From f23519e542e51c19ab3081deb089bb3f8fec7bb9 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sat, 17 Aug 2019 03:05:10 -0700 Subject: net/mlx5: cmdif, Add new api for command execution Add mlx5_cmd_do. Unlike mlx5_cmd_exec, this function will not modify or translate outbox.status. The function will return: return = 0: Command was executed, outbox.status == MLX5_CMD_STAT_OK. return = -EREMOTEIO: Executed, outbox.status != MLX5_CMD_STAT_OK. return < 0: Command execution couldn't be performed by FW or driver. And document other mlx5_cmd_exec functions. Signed-off-by: Saeed Mahameed Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 79 ++++++++++++++++++++++----- include/linux/mlx5/driver.h | 2 + 2 files changed, 68 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 7ff01b901f53..a2f87a686a18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -789,7 +789,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) cmd_status_str(status), status, syndrome, err); } -static int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) { /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ if (err == -ENXIO) { @@ -806,7 +806,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *ou } /* driver or FW delivery error */ - if (err) + if (err != -EREMOTEIO && err) return err; /* check outbox status */ @@ -816,6 +816,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *ou return err; } +EXPORT_SYMBOL(mlx5_cmd_check); static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) @@ -1869,6 +1870,38 @@ out_in: return err; } +/** + * mlx5_cmd_do - Executes a fw command, wait for completion. + * Unlike mlx5_cmd_exec, this function will not translate or intercept + * outbox.status and will return -EREMOTEIO when + * outbox.status != MLX5_CMD_STAT_OK + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: + * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. + * Caller must check FW outbox status. + * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. + * < 0 : Command execution couldn't be performed by firmware or driver + */ +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) +{ + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + + if (err) /* -EREMOTEIO is preserved */ + return err == -EREMOTEIO ? -EIO : err; + + if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) + return -EREMOTEIO; + + return 0; +} +EXPORT_SYMBOL(mlx5_cmd_do); + /** * mlx5_cmd_exec - Executes a fw command, wait for completion * @@ -1878,18 +1911,47 @@ out_in: * @out: outbox mlx5_ifc buffer * @out_size: outbox size * - * @return: 0 if no error, FW command execution was successful, + * @return: 0 if no error, FW command execution was successful * and outbox status is ok. */ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + int err = mlx5_cmd_do(dev, in, in_size, out, out_size); return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); +/** + * mlx5_cmd_exec_polling - Executes a fw command, poll for completion + * Needed for driver force teardown, when command completion EQ + * will not be available to complete the command + * + * @dev: mlx5 core device + * @in: inbox mlx5_ifc command buffer + * @in_size: inbox buffer size + * @out: outbox mlx5_ifc buffer + * @out_size: outbox size + * + * @return: 0 if no error, FW command execution was successful + * and outbox status is ok. + */ +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size) +{ + int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + + if (err) /* -EREMOTEIO is preserved */ + return err == -EREMOTEIO ? -EIO : err; + + if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) + err = -EREMOTEIO; + + return mlx5_cmd_check(dev, err, in, out); +} +EXPORT_SYMBOL(mlx5_cmd_exec_polling); + void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, struct mlx5_async_ctx *ctx) { @@ -1944,15 +2006,6 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, } EXPORT_SYMBOL(mlx5_cmd_exec_cb); -int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size) -{ - int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - - return mlx5_cmd_check(dev, err, in, out); -} -EXPORT_SYMBOL(mlx5_cmd_exec_polling); - static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8a8408708e6c..1b9bec8fa870 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -964,6 +964,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work); +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -- cgit v1.2.3-71-gd317 From 31803e59233efc838b9dcb26edea28a4b2389e97 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 30 Mar 2020 21:12:58 -0700 Subject: net/mlx5: Use mlx5_cmd_do() in core create_{cq,dct} mlx5_core_create_{cq/dct} functions are non-trivial mlx5 commands functions. They check command execution status themselves and hide valuable FW failure information. For mlx5_core/eth kernel user this is what we actually want, but for a devx/rdma user the hidden information is essential and should be propagated up to the caller, thus we convert these commands to use mlx5_cmd_do to return the FW/driver and command outbox status as is, and let the caller decide what to do with it. For kernel callers of mlx5_core_create_{cq/dct} or those who only care about the binary status (FAIL/SUCCESS) they must check status themselves via mlx5_cmd_check() to restore the current behavior. err = mlx5_create_cq(in, out) err = mlx5_cmd_check(err, in, out) if (err) // handle err For DEVX users and those who care about full visibility, They will just propagate the error to user space, and app can check if err == -EREMOTEIO, then outbox.{status,syndrome} are valid. API Note: mlx5_cmd_check() must be used by kernel users since it allows the driver to intercept the command execution status and return a driver simulated status in case of driver induced error handling or reset/recovery flows. Signed-off-by: Saeed Mahameed Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/devx.c | 6 +++--- drivers/infiniband/hw/mlx5/qp.c | 1 + drivers/infiniband/hw/mlx5/qpc.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 17 ++++++++++++++--- include/linux/mlx5/cq.h | 2 ++ 5 files changed, 21 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 08b7f6bc56c3..1f62c0ede048 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1497,9 +1497,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( !is_apu_cq(dev, cmd_in)) { obj->flags |= DEVX_OBJ_FLAGS_CQ; obj->core_cq.comp = devx_cq_comp; - err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, - cmd_in, cmd_in_len, cmd_out, - cmd_out_len); + err = mlx5_create_cq(dev->mdev, &obj->core_cq, + cmd_in, cmd_in_len, cmd_out, + cmd_out_len); } else { err = mlx5_cmd_exec(dev->mdev, cmd_in, cmd_in_len, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 29475cf8c7c3..b7fe47107d76 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4465,6 +4465,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, MLX5_ST_SZ_BYTES(create_dct_in), out, sizeof(out)); + err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); if (err) return err; resp.dctn = qp->dct.mdct.mqp.qpn; diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index 8844eacf2380..542e4c63a8de 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -220,7 +220,7 @@ int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct, init_completion(&dct->drained); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); - err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 5371ad0a12eb..15a74966be7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -86,8 +86,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *in, int inlen, u32 *out, int outlen) +/* Callers must verify outbox status in case of err */ +int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen) { int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn_or_apu_element); @@ -101,7 +102,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, memset(out, 0, outlen); MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev, in, inlen, out, outlen); if (err) return err; @@ -148,6 +149,16 @@ err_cmd: mlx5_cmd_exec_in(dev, destroy_cq, din); return err; } +EXPORT_SYMBOL(mlx5_create_cq); + +/* oubox is checked and err val is normalized */ +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen) +{ + int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen); + + return mlx5_cmd_check(dev, err, in, out); +} EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 7bfb67363434..cb15308b5cb0 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -183,6 +183,8 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq) complete(&cq->free); } +int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); -- cgit v1.2.3-71-gd317 From 0a41527608e7f3da61e76564f5a8749a1fddc7f1 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 31 Mar 2020 22:03:00 -0700 Subject: net/mlx5: cmdif, Refactor error handling and reporting of async commands Same as the new mlx5_cmd_do API, report all information to callers and let them handle the error values and outbox parsing. The user callback status "work->user_callback(status)" is now similar to the error rc code returned from the blocking mlx5_cmd_do() version, and now is defined as follows: -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. Caller must check FW outbox status. 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. < 0 : Command couldn't execute, FW or driver induced error. Signed-off-by: Saeed Mahameed Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/mr.c | 15 ++++++- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 61 ++++++++++++++++----------- include/linux/mlx5/driver.h | 3 +- 3 files changed, 52 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 157d862fb864..06e4b8cea6bd 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -140,6 +140,19 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } +static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) +{ + if (status == -ENXIO) /* core driver is not available */ + return; + + mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + if (status != -EREMOTEIO) /* driver specific failure */ + return; + + /* Failed in FW, print cmd out failure details */ + mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); +} + static void create_mkey_callback(int status, struct mlx5_async_work *context) { struct mlx5_ib_mr *mr = @@ -149,7 +162,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) unsigned long flags; if (status) { - mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); + create_mkey_warn(dev, status, mr->out); kfree(mr); spin_lock_irqsave(&ent->lock, flags); ent->pending--; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a2f87a686a18..823d5808d5a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -760,6 +760,18 @@ struct mlx5_ifc_mbox_in_bits { u8 reserved_at_40[0x40]; }; +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) +{ + u32 syndrome = MLX5_GET(mbox_out, out, syndrome); + u8 status = MLX5_GET(mbox_out, out, status); + + mlx5_core_err_rl(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", + mlx5_command_str(opcode), opcode, op_mod, + cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); +} +EXPORT_SYMBOL(mlx5_cmd_out_err); + static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { u16 opcode, op_mod; @@ -778,10 +790,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) err = cmd_status_to_err(status); if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) - mlx5_core_err_rl(dev, - "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", - mlx5_command_str(opcode), opcode, op_mod, - cmd_status_str(status), status, syndrome, err); + mlx5_cmd_out_err(dev, opcode, op_mod, out); else mlx5_core_dbg(dev, "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", @@ -1686,20 +1695,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force callback = ent->callback; context = ent->context; - err = ent->ret; - if (!err && !ent->status) { + err = ent->ret ? : ent->status; + if (err > 0) /* Failed in FW, command didn't execute */ + err = deliv_status_to_err(err); + + if (!err) err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); - err = mlx5_cmd_check(dev, err, ent->in->first.data, - ent->uout); - } - mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); - err = err ? err : ent->status; /* final consumer is done, release ent */ cmd_ent_put(ent); callback(err, context); @@ -1870,6 +1877,18 @@ out_in: return err; } +/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ +static int cmd_status_err(int err, void *out) +{ + if (err) /* -EREMOTEIO is preserved */ + return err == -EREMOTEIO ? -EIO : err; + + if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) + return -EREMOTEIO; + + return 0; +} + /** * mlx5_cmd_do - Executes a fw command, wait for completion. * Unlike mlx5_cmd_exec, this function will not translate or intercept @@ -1892,13 +1911,7 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - if (err) /* -EREMOTEIO is preserved */ - return err == -EREMOTEIO ? -EIO : err; - - if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) - return -EREMOTEIO; - - return 0; + return cmd_status_err(err, out); } EXPORT_SYMBOL(mlx5_cmd_do); @@ -1942,12 +1955,7 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - if (err) /* -EREMOTEIO is preserved */ - return err == -EREMOTEIO ? -EIO : err; - - if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) - err = -EREMOTEIO; - + err = cmd_status_err(err, out); return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec_polling); @@ -1980,8 +1988,10 @@ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); static void mlx5_cmd_exec_cb_handler(int status, void *_work) { struct mlx5_async_work *work = _work; - struct mlx5_async_ctx *ctx = work->ctx; + struct mlx5_async_ctx *ctx; + ctx = work->ctx; + status = cmd_status_err(status, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) wake_up(&ctx->wait); @@ -1995,6 +2005,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; + work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; ret = cmd_exec(ctx->dev, in, in_size, out, out_size, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1b9bec8fa870..432151d7d0bf 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -955,6 +955,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; + void *out; /* pointer to the cmd output buffer */ }; void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, @@ -963,7 +964,7 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work); - +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out); int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, -- cgit v1.2.3-71-gd317 From 72fb3b60a3114a1154a8ae5629ea3b43a88a7a4d Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sat, 14 Aug 2021 17:57:51 +0300 Subject: net/mlx5: Add reset_state field to MFRL register Add new field reset_state to MFRL register. This field expose current state of sync reset for fw update. This field enables sharing with the user more details on why fw activate failed in case it failed the sync reset stage. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 598ac3bcc901..8ca2d65ff789 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9694,7 +9694,8 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x6b]; + u8 reserved_at_0[0x6a]; + u8 reset_state[0x1]; u8 ptpcyc2realtime_modify[0x1]; u8 reserved_at_6c[0x2]; u8 pci_status_and_power[0x1]; @@ -10375,6 +10376,14 @@ struct mlx5_ifc_mcda_reg_bits { u8 data[][0x20]; }; +enum { + MLX5_MFRL_REG_RESET_STATE_IDLE = 0, + MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, + MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2, + MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3, + MLX5_MFRL_REG_RESET_STATE_NACK = 4, +}; + enum { MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), @@ -10393,7 +10402,8 @@ struct mlx5_ifc_mfrl_reg_bits { u8 pci_sync_for_fw_update_start[0x1]; u8 pci_sync_for_fw_update_resp[0x2]; u8 rst_type_sel[0x3]; - u8 reserved_at_28[0x8]; + u8 reserved_at_28[0x4]; + u8 reset_state[0x4]; u8 reset_type[0x8]; u8 reset_level[0x8]; }; -- cgit v1.2.3-71-gd317 From 45fee8edb4b333af79efad7a99de51718ebda94b Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Mon, 6 Sep 2021 11:02:44 +0300 Subject: net/mlx5: Add clarification on sync reset failure In case devlink reload action fw_activate failed in sync reset stage, use the new MFRL field reset_state to find why it failed and share this clarification with the user. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 10 ++-- drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 57 +++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h | 3 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 20 ++++++-- include/linux/mlx5/driver.h | 3 ++ 5 files changed, 74 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d1093bb2d436..057dde6f4417 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli } net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE); - err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive); + err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack); if (err) - goto out; + return err; - err = mlx5_fw_reset_wait_reset_done(dev); -out: - if (err) - NL_SET_ERR_MSG_MOD(extack, "FW activate command failed"); - return err; + return mlx5_fw_reset_wait_reset_done(dev); } static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 0b0234f9d694..d438d7a61500 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1); } -static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) +static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, + u8 *reset_type, u8 *reset_state) { u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; @@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r *reset_level = MLX5_GET(mfrl_reg, out, reset_level); if (reset_type) *reset_type = MLX5_GET(mfrl_reg, out, reset_type); + if (reset_state) + *reset_state = MLX5_GET(mfrl_reg, out, reset_state); return 0; } int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) { - return mlx5_reg_mfrl_query(dev, reset_level, reset_type); + return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL); } -int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel) +static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack) +{ + u8 reset_state; + + if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) + goto out; + + switch (reset_state) { + case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION: + case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS: + NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered"); + return -EBUSY; + case MLX5_MFRL_REG_RESET_STATE_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout"); + return -ETIMEDOUT; + case MLX5_MFRL_REG_RESET_STATE_NACK: + NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset"); + return -EPERM; + } + +out: + NL_SET_ERR_MSG_MOD(extack, "Sync reset failed"); + return -EIO; +} + +int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, + struct netlink_ext_ack *extack) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; int err; set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true); - if (err) - clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - return err; + + MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3); + MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel); + MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1); + err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MFRL, 0, 1, false); + if (!err) + return 0; + + clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); + if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) + return mlx5_fw_reset_get_reset_state_err(dev, extack); + + NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed"); + return mlx5_cmd_check(dev, err, in, out); } int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h index 7761ee5fc7d0..694fc7cb2684 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h @@ -9,7 +9,8 @@ void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable); bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev); int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type); -int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel); +int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, + struct netlink_ext_ack *extack); int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 1ef2b6a848c1..d15b417d3e07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -33,9 +33,10 @@ #include #include "mlx5_core.h" -int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, - int size_in, void *data_out, int size_out, - u16 reg_id, int arg, int write) +/* calling with verbose false will not print error to log */ +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose) { int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out; int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in; @@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, MLX5_SET(access_register_in, in, argument, arg); MLX5_SET(access_register_in, in, register_id, reg_id); - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + err = mlx5_cmd_do(dev, in, inlen, out, outlen); + if (verbose) + err = mlx5_cmd_check(dev, err, in, out); if (err) goto out; @@ -69,6 +72,15 @@ out: kvfree(in); return err; } +EXPORT_SYMBOL_GPL(mlx5_access_reg); + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_id, int arg, int write) +{ + return mlx5_access_reg(dev, data_in, size_in, data_out, size_out, + reg_id, arg, write, true); +} EXPORT_SYMBOL_GPL(mlx5_core_access_reg); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 432151d7d0bf..d3b1a6a1f8d2 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1031,6 +1031,9 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -- cgit v1.2.3-71-gd317 From 1241e329ce2e1f5b1039fd356b75867b29721ad2 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Wed, 23 Feb 2022 00:09:12 +0530 Subject: ethtool: add support to set/get completion queue event size Add support to set completion queue event size via ethtool -G parameter and get it via ethtool -g parameter. ~ # ./ethtool -G eth0 cqe-size 512 ~ # ./ethtool -g eth0 Ring parameters for eth0: Pre-set maximums: RX: 1048576 RX Mini: n/a RX Jumbo: n/a TX: 1048576 Current hardware settings: RX: 256 RX Mini: n/a RX Jumbo: n/a TX: 4096 RX Buf Len: 2048 CQE Size: 128 Signed-off-by: Subbaraya Sundeep Signed-off-by: Sunil Goutham Signed-off-by: Jakub Kicinski --- Documentation/networking/ethtool-netlink.rst | 11 +++++++++++ include/linux/ethtool.h | 4 ++++ include/uapi/linux/ethtool_netlink.h | 1 + net/ethtool/netlink.h | 2 +- net/ethtool/rings.c | 19 +++++++++++++++++-- 5 files changed, 34 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index cae28af7a476..24d9be69065d 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -861,6 +861,7 @@ Kernel response contents: ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split + ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ==================================== ====== =========================== ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with @@ -885,6 +886,7 @@ Request contents: ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring + ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by @@ -892,6 +894,15 @@ driver. Driver may impose additional constraints and may not suspport all attributes. +``ETHTOOL_A_RINGS_CQE_SIZE`` specifies the completion queue event size. +Completion queue events(CQE) are the events posted by NIC to indicate the +completion status of a packet when the packet is sent(like send success or +error) or received(like pointers to packet fragments). The CQE size parameter +enables to modify the CQE size other than default size if NIC supports it. +A bigger CQE can have more receive buffer pointers inturn NIC can transfer +a bigger frame from wire. Based on the NIC hardware, the overall completion +queue size can be adjusted in the driver if CQE size is modified. + CHANNELS_GET ============ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e0853f48b75e..4af58459a1e7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -71,18 +71,22 @@ enum { * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. * @tcp_data_split: Scatter packet headers and data to separate buffers + * @cqe_size: Size of TX/RX completion queue event */ struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; + u32 cqe_size; }; /** * enum ethtool_supported_ring_param - indicator caps for setting ring params * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len + * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), + ETHTOOL_RING_USE_CQE_SIZE = BIT(1), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 417d4280d7b5..979850221b8d 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -337,6 +337,7 @@ enum { ETHTOOL_A_RINGS_TX, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ + ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 75856db299e9..29d01662a48b 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 18a5035d3bee..9f33c9689b56 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -54,7 +54,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ nla_total_size(sizeof(u32)) + /* _RINGS_TX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ - nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */ + nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ + nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ } static int rings_fill_reply(struct sk_buff *skb, @@ -91,7 +92,9 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || (kr->tcp_data_split && (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, - kr->tcp_data_split)))) + kr->tcp_data_split))) || + (kr->cqe_size && + (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) return -EMSGSIZE; return 0; @@ -119,6 +122,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) @@ -159,6 +163,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); ethnl_update_u32(&kernel_ringparam.rx_buf_len, tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); + ethnl_update_u32(&kernel_ringparam.cqe_size, + tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); ret = 0; if (!mod) goto out_ops; @@ -190,6 +196,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) goto out_ops; } + if (kernel_ringparam.cqe_size && + !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_CQE_SIZE], + "setting cqe size not supported"); + goto out_ops; + } + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); if (ret < 0) -- cgit v1.2.3-71-gd317 From 5597f082fcaf17e2d9adee7a1f6fa02f5872f9d5 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Thu, 13 Jan 2022 15:34:07 +0100 Subject: can: bittiming: mark function arguments and local variables as const This patch marks the arguments of some functions as well as some local variables as constant. Link: https://lore.kernel.org/all/20220124215642.3474154-7-mkl@pengutronix.de Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev/bittiming.c | 12 ++++++------ include/linux/can/bittiming.h | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index 1b1d1499e2f1..2103bcca9012 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -24,7 +24,7 @@ */ static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, + const unsigned int sample_point_nominal, const unsigned int tseg, unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, unsigned int *sample_point_error_ptr) { @@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc, return best_sample_point; } -int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); @@ -208,10 +208,10 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, * prescaler value brp. You can find more information in the header * file linux/can/netlink.h. */ -static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, +static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { - struct can_priv *priv = netdev_priv(dev); + const struct can_priv *priv = netdev_priv(dev); unsigned int tseg1, alltseg; u64 brp64; @@ -244,7 +244,7 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, /* Checks the validity of predefined bitrate settings */ static int -can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, +can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt, const u32 *bitrate_const, const unsigned int bitrate_const_cnt) { @@ -258,7 +258,7 @@ can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, return -EINVAL; } -int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc, const u32 *bitrate_const, const unsigned int bitrate_const_cnt) diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index a81652d1c6f3..7ae21c0f7f23 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -113,7 +113,7 @@ struct can_tdc_const { }; #ifdef CONFIG_CAN_CALC_BITTIMING -int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc); void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, @@ -121,7 +121,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, u32 *ctrlmode, u32 ctrlmode_supported); #else /* !CONFIG_CAN_CALC_BITTIMING */ static inline int -can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { netdev_err(dev, "bit-timing calculation not available\n"); @@ -136,7 +136,7 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, } #endif /* CONFIG_CAN_CALC_BITTIMING */ -int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc, const u32 *bitrate_const, const unsigned int bitrate_const_cnt); -- cgit v1.2.3-71-gd317 From 05f2281b4192320a20d746df6146b3dd82f96e39 Mon Sep 17 00:00:00 2001 From: Ricardo Rivera-Matos Date: Mon, 14 Feb 2022 18:07:56 -0600 Subject: power: supply: Introduces bypass charging property Adds a POWER_SUPPLY_CHARGE_TYPE_BYPASS option to the POWER_SUPPLY_PROP_CHARGE_TYPE property to facilitate bypass charging operation. In bypass charging operation, the charger bypasses the charging path around the integrated converter allowing for a "smart" wall adaptor to perform the power conversion externally. This operational mode is critical for the USB PPS standard of power adaptors and is becoming a common feature in modern charging ICs such as: - BQ25980 - BQ25975 - BQ25960 - LN8000 - LN8410 Signed-off-by: Ricardo Rivera-Matos Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power | 7 +++++-- drivers/power/supply/power_supply_sysfs.c | 1 + include/linux/power_supply.h | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index fde21d900420..738dcb0cf180 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -380,13 +380,16 @@ Description: algorithm to adjust the charge rate dynamically, without any user configuration required. "Custom" means that the charger uses the charge_control_* properties as configuration for some - different algorithm. + different algorithm. "Bypass" means the charger bypasses the + charging path around the integrated converter allowing for a + "smart" wall adaptor to perform the power conversion + externally. Access: Read, Write Valid values: "Unknown", "N/A", "Trickle", "Fast", "Standard", - "Adaptive", "Custom" + "Adaptive", "Custom", "Bypass" What: /sys/class/power_supply//charge_term_current Date: July 2014 diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index c0dfcfa33206..4239591e1522 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -89,6 +89,7 @@ static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = { [POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE] = "Adaptive", [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom", [POWER_SUPPLY_CHARGE_TYPE_LONGLIFE] = "Long Life", + [POWER_SUPPLY_CHARGE_TYPE_BYPASS] = "Bypass", }; static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 006111917d1a..c135196aa9d1 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -49,6 +49,7 @@ enum { POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ + POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */ }; enum { -- cgit v1.2.3-71-gd317 From 4f0b903ded728c505850daf2914bfc08841f0ae6 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 23 Feb 2022 17:14:37 +0200 Subject: fsnotify: fix merge with parent's ignored mask fsnotify_parent() does not consider the parent's mark at all unless the parent inode shows interest in events on children and in the specific event. So unless parent added an event to both its mark mask and ignored mask, the event will not be ignored. Fix this by declaring the interest of an object in an event when the event is in either a mark mask or ignored mask. Link: https://lore.kernel.org/r/20220223151438.790268-2-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify_user.c | 17 +++++++++-------- fs/notify/mark.c | 4 ++-- include/linux/fsnotify_backend.h | 15 +++++++++++++++ 3 files changed, 26 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 2ff6bd85ba8f..bd99430a128d 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1003,17 +1003,18 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags, __u32 umask, int *destroy) { - __u32 oldmask = 0; + __u32 oldmask, newmask; /* umask bits cannot be removed by user */ mask &= ~umask; spin_lock(&fsn_mark->lock); + oldmask = fsnotify_calc_mask(fsn_mark); if (!(flags & FAN_MARK_IGNORED_MASK)) { - oldmask = fsn_mark->mask; fsn_mark->mask &= ~mask; } else { fsn_mark->ignored_mask &= ~mask; } + newmask = fsnotify_calc_mask(fsn_mark); /* * We need to keep the mark around even if remaining mask cannot * result in any events (e.g. mask == FAN_ONDIR) to support incremenal @@ -1023,7 +1024,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, *destroy = !((fsn_mark->mask | fsn_mark->ignored_mask) & ~umask); spin_unlock(&fsn_mark->lock); - return mask & oldmask; + return oldmask & ~newmask; } static int fanotify_remove_mark(struct fsnotify_group *group, @@ -1081,23 +1082,23 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, } static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, - __u32 mask, - unsigned int flags) + __u32 mask, unsigned int flags) { - __u32 oldmask = -1; + __u32 oldmask, newmask; spin_lock(&fsn_mark->lock); + oldmask = fsnotify_calc_mask(fsn_mark); if (!(flags & FAN_MARK_IGNORED_MASK)) { - oldmask = fsn_mark->mask; fsn_mark->mask |= mask; } else { fsn_mark->ignored_mask |= mask; if (flags & FAN_MARK_IGNORED_SURV_MODIFY) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; } + newmask = fsnotify_calc_mask(fsn_mark); spin_unlock(&fsn_mark->lock); - return mask & ~oldmask; + return newmask & ~oldmask; } static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 9007d6affff3..4853184f7dde 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -127,7 +127,7 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) return; hlist_for_each_entry(mark, &conn->list, obj_list) { if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) - new_mask |= mark->mask; + new_mask |= fsnotify_calc_mask(mark); } *fsnotify_conn_mask_p(conn) = new_mask; } @@ -692,7 +692,7 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, if (ret) goto err; - if (mark->mask) + if (mark->mask || mark->ignored_mask) fsnotify_recalc_mask(mark->connector); return ret; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 790c31844db5..5f9c960049b0 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -601,6 +601,21 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, /* functions used to manipulate the marks attached to inodes */ +/* Get mask for calculating object interest taking ignored mask into account */ +static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) +{ + __u32 mask = mark->mask; + + if (!mark->ignored_mask) + return mask; + + /* + * If mark is interested in ignoring events on children, the object must + * show interest in those events for fsnotify_parent() to notice it. + */ + return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS); +} + /* Get mask of events for a list of marks */ extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); /* Calculate mask of events for a list of marks */ -- cgit v1.2.3-71-gd317 From 04e317ba72d07901b03399b3d1525e83424df5b3 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 23 Feb 2022 17:14:38 +0200 Subject: fsnotify: optimize FS_MODIFY events with no ignored masks fsnotify() treats FS_MODIFY events specially - it does not skip them even if the FS_MODIFY event does not apear in the object's fsnotify mask. This is because send_to_group() checks if FS_MODIFY needs to clear ignored mask of marks. The common case is that an object does not have any mark with ignored mask and in particular, that it does not have a mark with ignored mask and without the FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY flag. Set FS_MODIFY in object's fsnotify mask during fsnotify_recalc_mask() if object has a mark with an ignored mask and without the FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY flag and remove the special treatment of FS_MODIFY in fsnotify(), so that FS_MODIFY events could be optimized in the common case. Call fsnotify_recalc_mask() from fanotify after adding or removing an ignored mask from a mark without FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY or when adding the FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY flag to a mark with ignored mask (the flag cannot be removed by fanotify uapi). Performance results for doing 10000000 write(2)s to tmpfs: vanilla patched without notification mark 25.486+-1.054 24.965+-0.244 with notification mark 30.111+-0.139 26.891+-1.355 So we can see the overhead of notification subsystem has been drastically reduced. Link: https://lore.kernel.org/r/20220223151438.790268-3-amir73il@gmail.com Suggested-by: Jan Kara Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify_user.c | 32 +++++++++++++++++++++++++------- fs/notify/fsnotify.c | 8 +++++--- include/linux/fsnotify_backend.h | 4 ++++ 3 files changed, 34 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index bd99430a128d..9b32b76a9c30 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1081,8 +1081,28 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, flags, umask); } +static void fanotify_mark_add_ignored_mask(struct fsnotify_mark *fsn_mark, + __u32 mask, unsigned int flags, + __u32 *removed) +{ + fsn_mark->ignored_mask |= mask; + + /* + * Setting FAN_MARK_IGNORED_SURV_MODIFY for the first time may lead to + * the removal of the FS_MODIFY bit in calculated mask if it was set + * because of an ignored mask that is now going to survive FS_MODIFY. + */ + if ((flags & FAN_MARK_IGNORED_SURV_MODIFY) && + !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) { + fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; + if (!(fsn_mark->mask & FS_MODIFY)) + *removed = FS_MODIFY; + } +} + static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, - __u32 mask, unsigned int flags) + __u32 mask, unsigned int flags, + __u32 *removed) { __u32 oldmask, newmask; @@ -1091,9 +1111,7 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, if (!(flags & FAN_MARK_IGNORED_MASK)) { fsn_mark->mask |= mask; } else { - fsn_mark->ignored_mask |= mask; - if (flags & FAN_MARK_IGNORED_SURV_MODIFY) - fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; + fanotify_mark_add_ignored_mask(fsn_mark, mask, flags, removed); } newmask = fsnotify_calc_mask(fsn_mark); spin_unlock(&fsn_mark->lock); @@ -1156,7 +1174,7 @@ static int fanotify_add_mark(struct fsnotify_group *group, __kernel_fsid_t *fsid) { struct fsnotify_mark *fsn_mark; - __u32 added; + __u32 added, removed = 0; int ret = 0; mutex_lock(&group->mark_mutex); @@ -1179,8 +1197,8 @@ static int fanotify_add_mark(struct fsnotify_group *group, goto out; } - added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); - if (added & ~fsnotify_conn_mask(fsn_mark->connector)) + added = fanotify_mark_add_to_mask(fsn_mark, mask, flags, &removed); + if (removed || (added & ~fsnotify_conn_mask(fsn_mark->connector))) fsnotify_recalc_mask(fsn_mark->connector); out: diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ab81a0776ece..494f653efbc6 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -531,11 +531,13 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, /* - * if this is a modify event we may need to clear the ignored masks - * otherwise return if none of the marks care about this type of event. + * If this is a modify event we may need to clear some ignored masks. + * In that case, the object with ignored masks will have the FS_MODIFY + * event in its mask. + * Otherwise, return if none of the marks care about this type of event. */ test_mask = (mask & ALL_FSNOTIFY_EVENTS); - if (!(mask & FS_MODIFY) && !(test_mask & marks_mask)) + if (!(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 5f9c960049b0..0805b74cae44 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -609,6 +609,10 @@ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) if (!mark->ignored_mask) return mask; + /* Interest in FS_MODIFY may be needed for clearing ignored mask */ + if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) + mask |= FS_MODIFY; + /* * If mark is interested in ignoring events on children, the object must * show interest in those events for fsnotify_parent() to notice it. -- cgit v1.2.3-71-gd317 From 0cc62aed370d91ec3d5cf258e53028c736e88a09 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 21 Jan 2022 08:42:21 +0000 Subject: sizes.h: Add SZ_1T macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Today drivers/pci/controller/pci-xgene.c defines SZ_1T Move it into linux/sizes.h so that it can be re-used elsewhere. Link: https://lore.kernel.org/r/575cb7164cf124c75df7cb9242ea7374733942bf.1642752946.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Signed-off-by: Lorenzo Pieralisi Reviewed-by: Krzysztof Wilczyński Acked-by: Bjorn Helgaas Cc: Toan Le Cc: linux-pci@vger.kernel.org --- drivers/pci/controller/pci-xgene.c | 1 - include/linux/sizes.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 0d5acbfc7143..77c1fe7e11f9 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -49,7 +49,6 @@ #define EN_REG 0x00000001 #define OB_LO_IO 0x00000002 #define XGENE_PCIE_DEVICEID 0xE004 -#define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) #define XGENE_V1_PCI_EXP_CAP 0x40 diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 1ac79bcee2bb..84aa448d8bb3 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -47,6 +47,8 @@ #define SZ_8G _AC(0x200000000, ULL) #define SZ_16G _AC(0x400000000, ULL) #define SZ_32G _AC(0x800000000, ULL) + +#define SZ_1T _AC(0x10000000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ -- cgit v1.2.3-71-gd317 From 34737e26980341519d00e84711fe619f9f47e79c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 11 Feb 2022 08:50:00 +0100 Subject: uaccess: add generic __{get,put}_kernel_nofault Nine architectures are still missing __{get,put}_kernel_nofault: alpha, ia64, microblaze, nds32, nios2, openrisc, sh, sparc32, xtensa. Add a generic version that lets everything use the normal copy_{from,to}_kernel_nofault() code based on these, removing the last use of get_fs()/set_fs() from architecture-independent code. Reviewed-by: Christoph Hellwig Acked-by: Geert Uytterhoeven Signed-off-by: Arnd Bergmann --- arch/arm/include/asm/uaccess.h | 2 - arch/arm64/include/asm/uaccess.h | 2 - arch/m68k/include/asm/uaccess.h | 2 - arch/mips/include/asm/uaccess.h | 2 - arch/parisc/include/asm/uaccess.h | 1 - arch/powerpc/include/asm/uaccess.h | 2 - arch/riscv/include/asm/uaccess.h | 2 - arch/s390/include/asm/uaccess.h | 2 - arch/sparc/include/asm/uaccess_64.h | 2 - arch/um/include/asm/uaccess.h | 2 - arch/x86/include/asm/uaccess.h | 2 - include/asm-generic/uaccess.h | 2 - include/linux/uaccess.h | 19 +++++++ mm/maccess.c | 108 ------------------------------------ 14 files changed, 19 insertions(+), 131 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 32dbfd81f42a..d20d78c34b94 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -476,8 +476,6 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ const type *__pk_ptr = (src); \ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 3a5ff5e20586..2e20879fe3cf 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -26,8 +26,6 @@ #include #include -#define HAVE_GET_KERNEL_NOFAULT - /* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h index ba670523885c..79617c0b2f91 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -390,8 +390,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ type *__gk_dst = (type *)(dst); \ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index f8f74f9f5883..db9a8e002b62 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -296,8 +296,6 @@ struct __large_struct { unsigned long buf[100]; }; (val) = __gu_tmp.t; \ } -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ int __gu_err; \ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ebf8a845b017..0925bbd6db67 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -95,7 +95,6 @@ struct exception_table_entry { (val) = (__force __typeof__(*(ptr))) __gu_val; \ } -#define HAVE_GET_KERNEL_NOFAULT #define __get_kernel_nofault(dst, src, type, err_label) \ { \ type __z; \ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 63316100080c..a0032c2e7550 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -467,8 +467,6 @@ do { \ unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \ } while (0) -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ __get_user_size_goto(*((type *)(dst)), \ (__force type __user *)(src), sizeof(type), err_label) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index c701a5e57a2b..4407b9e48d2c 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -346,8 +346,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) __clear_user(to, n) : n; } -#define HAVE_GET_KERNEL_NOFAULT - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ long __kr_err; \ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d74e26b48604..29332edf46f0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -282,8 +282,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo int copy_to_user_real(void __user *dest, void *src, unsigned long count); void *s390_kernel_write(void *dst, const void *src, size_t size); -#define HAVE_GET_KERNEL_NOFAULT - int __noreturn __put_kernel_bad(void); #define __put_kernel_asm(val, to, insn) \ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index b283798315b1..5c12fb46bc61 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -210,8 +210,6 @@ __asm__ __volatile__( \ : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ "i" (-EFAULT)) -#define HAVE_GET_KERNEL_NOFAULT - #define __get_user_nocheck(data, addr, size, type) ({ \ register int __gu_ret; \ register unsigned long __gu_val; \ diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 17d18cfd82a5..1ecfc96bcc50 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -44,8 +44,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size) } /* no pagefaults for kernel addresses in um */ -#define HAVE_GET_KERNEL_NOFAULT 1 - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ *((type *)dst) = get_unaligned((type *)(src)); \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a59ba2578e64..201efcec66b7 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -507,8 +507,6 @@ do { \ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ } while (0) -#define HAVE_GET_KERNEL_NOFAULT - #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT #define __get_kernel_nofault(dst, src, type, err_label) \ __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 10ffa8b5c117..0870fa11a7c5 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -77,8 +77,6 @@ do { \ goto err_label; \ } while (0) -#define HAVE_GET_KERNEL_NOFAULT 1 - static inline __must_check unsigned long raw_copy_from_user(void *to, const void __user * from, unsigned long n) { diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ac0394087f7d..67e9bc94dc40 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -368,6 +368,25 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count); long strnlen_user_nofault(const void __user *unsafe_addr, long count); +#ifndef __get_kernel_nofault +#define __get_kernel_nofault(dst, src, type, label) \ +do { \ + type __user *p = (type __force __user *)(src); \ + type data; \ + if (__get_user(data, p)) \ + goto label; \ + *(type *)dst = data; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, label) \ +do { \ + type __user *p = (type __force __user *)(dst); \ + type data = *(type *)src; \ + if (__put_user(data, p)) \ + goto label; \ +} while (0) +#endif + /** * get_kernel_nofault(): safely attempt to read from a location * @val: read into this variable diff --git a/mm/maccess.c b/mm/maccess.c index d3f1a1f0b1c1..cbd1b3959af2 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -12,8 +12,6 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, return true; } -#ifdef HAVE_GET_KERNEL_NOFAULT - #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __get_kernel_nofault(dst, src, type, err_label); \ @@ -102,112 +100,6 @@ Efault: dst[-1] = '\0'; return -EFAULT; } -#else /* HAVE_GET_KERNEL_NOFAULT */ -/** - * copy_from_kernel_nofault(): safely attempt to read from kernel-space - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from kernel address @src to the buffer at @dst. If a kernel - * fault happens, handle that and return -EFAULT. If @src is not a valid kernel - * address, return -ERANGE. - * - * We ensure that the copy_from_user is executed in atomic context so that - * do_page_fault() doesn't attempt to take mmap_lock. This makes - * copy_from_kernel_nofault() suitable for use within regions where the caller - * already holds mmap_lock, or other locks which nest inside mmap_lock. - */ -long copy_from_kernel_nofault(void *dst, const void *src, size_t size) -{ - long ret; - mm_segment_t old_fs = get_fs(); - - if (!copy_from_kernel_nofault_allowed(src, size)) - return -ERANGE; - - set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_from_user_inatomic(dst, (__force const void __user *)src, - size); - pagefault_enable(); - set_fs(old_fs); - - if (ret) - return -EFAULT; - return 0; -} -EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); - -/** - * copy_to_kernel_nofault(): safely attempt to write to a location - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ -long copy_to_kernel_nofault(void *dst, const void *src, size_t size) -{ - long ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); - set_fs(old_fs); - - if (ret) - return -EFAULT; - return 0; -} - -/** - * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe - * address. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @unsafe_addr: Unsafe address. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from unsafe address to kernel buffer. - * - * On success, returns the length of the string INCLUDING the trailing NUL. - * - * If access fails, returns -EFAULT (some data may have been copied and the - * trailing NUL added). If @unsafe_addr is not a valid kernel address, return - * -ERANGE. - * - * If @count is smaller than the length of the string, copies @count-1 bytes, - * sets the last byte of @dst buffer to NUL and returns @count. - */ -long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) -{ - mm_segment_t old_fs = get_fs(); - const void *src = unsafe_addr; - long ret; - - if (unlikely(count <= 0)) - return 0; - if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) - return -ERANGE; - - set_fs(KERNEL_DS); - pagefault_disable(); - - do { - ret = __get_user(*dst++, (const char __user __force *)src++); - } while (dst[-1] && ret == 0 && src - unsafe_addr < count); - - dst[-1] = '\0'; - pagefault_enable(); - set_fs(old_fs); - - return ret ? -EFAULT : src - unsafe_addr; -} -#endif /* HAVE_GET_KERNEL_NOFAULT */ /** * copy_from_user_nofault(): safely attempt to read from a user-space location -- cgit v1.2.3-71-gd317 From 12700c17fc286149324f92d6d380bc48e43f253d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 15 Feb 2022 17:55:04 +0100 Subject: uaccess: generalize access_ok() There are many different ways that access_ok() is defined across architectures, but in the end, they all just compare against the user_addr_max() value or they accept anything. Provide one definition that works for most architectures, checking against TASK_SIZE_MAX for user processes or skipping the check inside of uaccess_kernel() sections. For architectures without CONFIG_SET_FS(), this should be the fastest check, as it comes down to a single comparison of a pointer against a compile-time constant, while the architecture specific versions tend to do something more complex for historic reasons or get something wrong. Type checking for __user annotations is handled inconsistently across architectures, but this is easily simplified as well by using an inline function that takes a 'const void __user *' argument. A handful of callers need an extra __user annotation for this. Some architectures had trick to use 33-bit or 65-bit arithmetic on the addresses to calculate the overflow, however this simpler version uses fewer registers, which means it can produce better object code in the end despite needing a second (statically predicted) branch. Reviewed-by: Christoph Hellwig Acked-by: Mark Rutland [arm64, asm-generic] Acked-by: Geert Uytterhoeven Acked-by: Stafford Horne Acked-by: Dinh Nguyen Signed-off-by: Arnd Bergmann --- arch/Kconfig | 7 ++++ arch/alpha/include/asm/uaccess.h | 34 +++----------------- arch/arc/include/asm/uaccess.h | 29 ----------------- arch/arm/include/asm/uaccess.h | 20 +----------- arch/arm64/include/asm/uaccess.h | 11 +++---- arch/csky/include/asm/uaccess.h | 8 ----- arch/hexagon/include/asm/uaccess.h | 25 --------------- arch/ia64/include/asm/uaccess.h | 5 ++- arch/m68k/Kconfig.cpu | 1 + arch/m68k/include/asm/uaccess.h | 15 +-------- arch/microblaze/include/asm/uaccess.h | 8 +---- arch/mips/include/asm/uaccess.h | 29 +---------------- arch/nds32/include/asm/uaccess.h | 7 ++-- arch/nios2/include/asm/uaccess.h | 11 +------ arch/openrisc/include/asm/uaccess.h | 19 +---------- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/uaccess.h | 12 ++----- arch/powerpc/include/asm/uaccess.h | 11 +------ arch/riscv/include/asm/uaccess.h | 31 +----------------- arch/s390/Kconfig | 1 + arch/s390/include/asm/uaccess.h | 14 +------- arch/sh/include/asm/uaccess.h | 22 ++----------- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/uaccess.h | 3 -- arch/sparc/include/asm/uaccess_32.h | 18 +++-------- arch/sparc/include/asm/uaccess_64.h | 12 +------ arch/um/include/asm/uaccess.h | 5 +-- arch/x86/include/asm/uaccess.h | 14 ++------ arch/xtensa/include/asm/uaccess.h | 10 +----- include/asm-generic/access_ok.h | 60 +++++++++++++++++++++++++++++++++++ include/asm-generic/uaccess.h | 21 +----------- include/linux/uaccess.h | 7 ---- 32 files changed, 110 insertions(+), 362 deletions(-) create mode 100644 include/asm-generic/access_ok.h (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..fa5db36bda67 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -898,6 +898,13 @@ config HAVE_SOFTIRQ_ON_OWN_STACK Architecture provides a function to run __do_softirq() on a separate stack. +config ALTERNATE_USER_ADDRESS_SPACE + bool + help + Architectures set this when the CPU uses separate address + spaces for kernel and user space pointers. In this case, the + access_ok() check on a __user pointer is skipped. + config PGTABLE_LEVELS int default 2 diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 1b6f25efa247..82c5743fc9cd 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,28 +20,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * Is a address valid? This does a straightforward calculation rather - * than tests. - * - * Address valid if: - * - "addr" doesn't have any high-bits set - * - AND "size" doesn't have any high-bits set - * - AND "addr+size-(size != 0)" doesn't have any high-bits set - * - OR we are in kernel mode. - */ -#define __access_ok(addr, size) ({ \ - unsigned long __ao_a = (addr), __ao_b = (size); \ - unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ - (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) - -#define access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)), (size)); \ -}) +#include /* * These are the main single-value transfer routines. They automatically @@ -105,7 +84,7 @@ extern void __get_user_unknown(void); long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr, size)) { \ + if (__access_ok(__gu_addr, size)) { \ __gu_err = 0; \ switch (size) { \ case 1: __get_user_8(__gu_addr); break; \ @@ -200,7 +179,7 @@ extern void __put_user_unknown(void); ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr, size)) { \ + if (__access_ok(__pu_addr, size)) { \ __pu_err = 0; \ switch (size) { \ case 1: __put_user_8(x, __pu_addr); break; \ @@ -316,17 +295,14 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long len) extern long __clear_user(void __user *to, long len); -extern inline long +static inline long clear_user(void __user *to, long len) { - if (__access_ok((unsigned long)to, len)) + if (__access_ok(to, len)) len = __clear_user(to, len); return len; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 783bfdb3bfa3..30f80b4be2ab 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -23,35 +23,6 @@ #include /* for generic string functions */ - -#define __kernel_ok (uaccess_kernel()) - -/* - * Algorithmically, for __user_ok() we want do: - * (start < TASK_SIZE) && (start+len < TASK_SIZE) - * where TASK_SIZE could either be retrieved from thread_info->addr_limit or - * emitted directly in code. - * - * This can however be rewritten as follows: - * (len <= TASK_SIZE) && (start+len < TASK_SIZE) - * - * Because it essentially checks if buffer end is within limit and @len is - * non-ngeative, which implies that buffer start will be within limit too. - * - * The reason for rewriting being, for majority of cases, @len is generally - * compile time constant, causing first sub-expression to be compile time - * subsumed. - * - * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10), - * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem - * would already have been done at this call site for __kernel_ok() - * - */ -#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ - ((addr) <= (get_fs() - (sz)))) -#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ - likely(__user_ok((addr), (sz)))) - /*********** Single byte/hword/word copies ******************/ #define __get_user_fn(sz, u, k) \ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index d20d78c34b94..2fcbec9c306c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -55,21 +55,6 @@ extern int __put_user_bad(void); #ifdef CONFIG_MMU -/* - * We use 33-bit arithmetic here. Success returns zero, failure returns - * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS, - * so this will always return success in that case. - */ -#define __range_ok(addr, size) ({ \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - __asm__(".syntax unified\n" \ - "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \ - : "=&r" (flag), "=&r" (roksum) \ - : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \ - : "cc"); \ - flag; }) - /* * This is a type: either unsigned long, if the argument fits into * that type, or otherwise unsigned long long. @@ -241,15 +226,12 @@ extern int __put_user_8(void *, unsigned long long); #else /* CONFIG_MMU */ -#define __addr_ok(addr) ((void)(addr), 1) -#define __range_ok(addr, size) ((void)(addr), 0) - #define get_user(x, p) __get_user(x, p) #define __put_user_check __put_user_nocheck #endif /* CONFIG_MMU */ -#define access_ok(addr, size) (__range_ok(addr, size) == 0) +#include #ifdef CONFIG_CPU_SPECTRE /* diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 199c553b740a..e8dce0cc5eaa 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -26,13 +26,7 @@ #include #include -static inline int __access_ok(const void __user *ptr, unsigned long size) -{ - unsigned long limit = TASK_SIZE_MAX; - unsigned long addr = (unsigned long)ptr; - - return (size <= limit) && (addr <= (limit - size)); -} +static inline int __access_ok(const void __user *ptr, unsigned long size); /* * Test whether a block of memory is a valid user space address. @@ -54,6 +48,9 @@ static inline int access_ok(const void __user *addr, unsigned long size) return likely(__access_ok(addr, size)); } +#define access_ok access_ok + +#include /* * User access enabling/disabling. diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index ac5a54f57d40..fec8f77ffc99 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -5,14 +5,6 @@ #define user_addr_max() (current_thread_info()->addr_limit.seg) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = user_addr_max(); - - return (size <= limit) && (addr <= (limit - size)); -} -#define __access_ok __access_ok - /* * __put_user_fn */ diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 719ba3f3c45c..bff77efc0d9a 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -12,31 +12,6 @@ */ #include -/* - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block *may* be valid, false (zero) - * if it is definitely invalid. - * - */ -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) - -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = TASK_SIZE; - - return (size <= limit) && (addr <= (limit - size)); -} -#define __access_ok __access_ok - /* * When a kernel-mode page fault is taken, the faulting instruction * address is checked against a table of exception_table_entries. diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index e19d2dcc0ced..e242a3cc1330 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -50,8 +50,6 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - /* * When accessing user memory, we need to make sure the entire area really is in * user-level space. In order to do this efficiently, we make sure that the page at @@ -65,7 +63,8 @@ static inline int __access_ok(const void __user *p, unsigned long size) return likely(addr <= seg) && (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); } -#define access_ok(addr, size) __access_ok((addr), (size)) +#define __access_ok __access_ok +#include /* * These are the main single-value transfer routines. They automatically diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 0d00ef5117dc..16ea9a67723c 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -453,6 +453,7 @@ config CPU_HAS_NO_UNALIGNED config CPU_HAS_ADDRESS_SPACES bool + select ALTERNATE_USER_ADDRESS_SPACE config FPU bool diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h index 9f0f1b6e14ed..64914872a5c9 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -10,20 +10,7 @@ #include #include #include - -/* We let the MMU do all checking */ -static inline int access_ok(const void __user *ptr, - unsigned long size) -{ - unsigned long limit = TASK_SIZE; - unsigned long addr = (unsigned long)ptr; - - if (IS_ENABLED(CONFIG_CPU_HAS_ADDRESS_SPACES) || - !IS_ENABLED(CONFIG_MMU)) - return 1; - - return (size <= limit) && (addr <= (limit - size)); -} +#include /* * Not all varients of the 68k family support the notion of address spaces. diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 3fe96979d2c6..bf9b7657a65a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -39,13 +39,7 @@ # define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = user_addr_max(); - - return (size <= limit) && (addr <= (limit - size)); -} -#define access_ok(addr, size) __access_ok((unsigned long)addr, size) +#include # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 73e543bc2e0e..c0cede273c7c 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -44,34 +44,7 @@ extern u64 __ua_limit; #endif /* CONFIG_64BIT */ -/* - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ - -static inline int __access_ok(const void __user *p, unsigned long size) -{ - unsigned long addr = (unsigned long)p; - unsigned long limit = TASK_SIZE_MAX; - - return (size <= limit) && (addr <= (limit - size)); -} - -#define access_ok(addr, size) \ - likely(__access_ok((addr), (size))) +#include /* * put_user: - Write a simple value into user space. diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 37a40981deb3..832d642a4068 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -38,18 +38,15 @@ extern int fixup_exception(struct pt_regs *regs); #define get_fs() (current_thread_info()->addr_limit) #define user_addr_max get_fs +#define uaccess_kernel() (get_fs() == KERNEL_DS) static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; } -#define uaccess_kernel() (get_fs() == KERNEL_DS) +#include -#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) - -#define access_ok(addr, size) \ - __range_ok((unsigned long)addr, (unsigned long)size) /* * Single-value transfer routines. They automatically use the right * size if we just have the right pointer type. Note that the functions diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a5cbe07cf0da..6664ddc0e8e5 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -30,19 +30,10 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#define __access_ok(addr, len) \ - (((signed long)(((long)get_fs().seg) & \ - ((long)(addr) | (((long)(addr)) + (len)) | (len)))) == 0) - -#define access_ok(addr, len) \ - likely(__access_ok((unsigned long)(addr), (unsigned long)(len))) +#include # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) - /* * Zero Userspace */ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 120f5005461b..8f049ec99b3e 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -45,21 +45,7 @@ #define uaccess_kernel() (get_fs() == KERNEL_DS) -/* Ensure that the range from addr to addr+size is all within the process' - * address space - */ -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - const mm_segment_t fs = get_fs(); - - return size <= fs && addr <= (fs - size); -} - -#define access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) +#include /* * These are the main single-value transfer routines. They automatically @@ -268,9 +254,6 @@ clear_user(void __user *addr, unsigned long size) return size; } -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 43c1c880def6..15039fdd5413 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PARISC def_bool y + select ALTERNATE_USER_ADDRESS_SPACE select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_FUNCTION_TRACER diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 0925bbd6db67..187f4bdff13e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -11,15 +11,9 @@ #include #include -/* - * Note that since kernel addresses are in a separate address space on - * parisc, we don't need to do anything for access_ok(). - * We just let the page fault handler do the right thing. This also means - * that put_user is the same as __put_user, etc. - */ - -#define access_ok(uaddr, size) \ - ( (uaddr) == (uaddr) ) +#define TASK_SIZE_MAX DEFAULT_TASK_SIZE +#include +#include #define put_user __put_user #define get_user __get_user diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a0032c2e7550..2e83217f52de 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -11,18 +11,9 @@ #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ #define TASK_SIZE_MAX TASK_SIZE_USER64 -#else -#define TASK_SIZE_MAX TASK_SIZE #endif -static inline bool __access_ok(unsigned long addr, unsigned long size) -{ - return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr; -} - -#define access_ok(addr, size) \ - (__chk_user_ptr(addr), \ - __access_ok((unsigned long)(addr), (size))) +#include /* * These are the main single-value transfer routines. They automatically diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 4407b9e48d2c..855450bed9f5 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -21,42 +21,13 @@ #include #include #include +#include #define __enable_user_access() \ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory") #define __disable_user_access() \ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") -/** - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ -#define access_ok(addr, size) ({ \ - __chk_user_ptr(addr); \ - likely(__access_ok((unsigned long __force)(addr), (size))); \ -}) - -/* - * Ensure that the range [addr, addr+size) is within the process's - * address space - */ -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return size <= TASK_SIZE && addr <= TASK_SIZE - size; -} - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index be9f39fd06df..fb48a62aa985 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -55,6 +55,7 @@ config S390 # Note: keep this list sorted alphabetically # imply IMA_SECURE_AND_OR_TRUSTED_BOOT + select ALTERNATE_USER_ADDRESS_SPACE select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 29332edf46f0..5cb258cd9d29 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -17,22 +17,10 @@ #include #include #include +#include void debug_user_asce(int exit); -static inline int __range_ok(unsigned long addr, unsigned long size) -{ - return 1; -} - -#define __access_ok(addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __range_ok((unsigned long)(addr), (size)); \ -}) - -#define access_ok(addr, size) __access_ok(addr, size) - unsigned long __must_check raw_copy_from_user(void *to, const void __user *from, unsigned long n); diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 8867bb04b00e..ccd219d74851 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -5,28 +5,10 @@ #include #include -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) - -/* - * __access_ok: Check if address with size is OK or not. - * - * Uhhuh, this needs 33-bit arithmetic. We have a carry.. - * - * sum := addr + size; carry? --> flag = true; - * if (sum >= addr_limit) flag = true; - */ -#define __access_ok(addr, size) ({ \ - unsigned long __ao_a = (addr), __ao_b = (size); \ - unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ - __ao_end >= __ao_a && __addr_ok(__ao_end); }) - -#define access_ok(addr, size) \ - (__chk_user_ptr(addr), \ - __access_ok((unsigned long __force)(addr), (size))) - #define user_addr_max() (current_thread_info()->addr_limit.seg) +#include + /* * Uh, these should become the main single-value transfer routines ... * They automatically use the right size if we just have the right diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 1cab1b284f1a..9f6f9bce5292 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -62,6 +62,7 @@ config SPARC32 config SPARC64 def_bool 64BIT + select ALTERNATE_USER_ADDRESS_SPACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_KRETPROBES diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index 390094200fc4..ee75f69e3fcd 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -10,9 +10,6 @@ #include #endif -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - long strncpy_from_user(char *dest, const char __user *src, long count); #endif diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 4a12346bb69c..367747116260 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -25,17 +25,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test - * can be fairly lightweight. - * No one can read/write anything from userland in the kernel space by setting - * large size and address near to PAGE_OFFSET - a fault will break his intentions. - */ -#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) -#define __kernel_ok (uaccess_kernel()) -#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) -#define access_ok(addr, size) __access_ok((unsigned long)(addr), size) +#include /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right @@ -47,13 +37,13 @@ * and hide all the ugliness from the user. */ #define put_user(x, ptr) ({ \ - unsigned long __pu_addr = (unsigned long)(ptr); \ + void __user *__pu_addr = (ptr); \ __chk_user_ptr(ptr); \ __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ }) #define get_user(x, ptr) ({ \ - unsigned long __gu_addr = (unsigned long)(ptr); \ + const void __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ }) @@ -232,7 +222,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size) static inline unsigned long clear_user(void __user *addr, unsigned long n) { - if (n && __access_ok((unsigned long) addr, n)) + if (n && __access_ok(addr, n)) return __clear_user(addr, n); else return n; diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 5c12fb46bc61..59b9a545df23 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -31,7 +31,7 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) +#include #define set_fs(val) \ do { \ @@ -61,16 +61,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ }) -static inline int __access_ok(const void __user * addr, unsigned long size) -{ - return 1; -} - -static inline int access_ok(const void __user * addr, unsigned long size) -{ - return 1; -} - void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 1ecfc96bcc50..7d9d60e41e4e 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -25,7 +25,7 @@ extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __clear_user(void __user *mem, unsigned long len); -static inline int __access_ok(unsigned long addr, unsigned long size); +static inline int __access_ok(const void __user *ptr, unsigned long size); /* Teach asm-generic/uaccess.h that we have C functions for these. */ #define __access_ok __access_ok @@ -36,8 +36,9 @@ static inline int __access_ok(unsigned long addr, unsigned long size); #include -static inline int __access_ok(unsigned long addr, unsigned long size) +static inline int __access_ok(const void __user *ptr, unsigned long size) { + unsigned long addr = (unsigned long)ptr; return __addr_range_nowrap(addr, size) && (__under_task_size(addr, size) || __access_ok_vsyscall(addr, size)); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 201efcec66b7..f78e2b3501a1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -12,18 +12,6 @@ #include #include -/* - * Test whether a block of memory is a valid user space address. - * Returns 0 if the range is valid, nonzero otherwise. - */ -static inline bool __access_ok(void __user *ptr, unsigned long size) -{ - unsigned long limit = TASK_SIZE_MAX; - unsigned long addr = ptr; - - return (size <= limit) && (addr <= (limit - size)); -} - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline bool pagefault_disabled(void); # define WARN_ON_IN_IRQ() \ @@ -55,6 +43,8 @@ static inline bool pagefault_disabled(void); likely(__access_ok(addr, size)); \ }) +#include + extern int __get_user_1(void); extern int __get_user_2(void); extern int __get_user_4(void); diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 75bd8fbf52ba..0edd9e4b23d0 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,15 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#define __kernel_ok (uaccess_kernel()) -#define __user_ok(addr, size) \ - (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) -#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) -#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size)) - -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) +#include /* * These are the main single-value transfer routines. They diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h new file mode 100644 index 000000000000..d38cc5dad65b --- /dev/null +++ b/include/asm-generic/access_ok.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ACCESS_OK_H__ +#define __ASM_GENERIC_ACCESS_OK_H__ + +/* + * Checking whether a pointer is valid for user space access. + * These definitions work on most architectures, but overrides can + * be used where necessary. + */ + +/* + * architectures with compat tasks have a variable TASK_SIZE and should + * override this to a constant. + */ +#ifndef TASK_SIZE_MAX +#define TASK_SIZE_MAX TASK_SIZE +#endif + +#ifndef uaccess_kernel +#ifdef CONFIG_SET_FS +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) +#else +#define uaccess_kernel() (0) +#endif +#endif + +#ifndef user_addr_max +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE_MAX) +#endif + +#ifndef __access_ok +/* + * 'size' is a compile-time constant for most callers, so optimize for + * this case to turn the check into a single comparison against a constant + * limit and catch all possible overflows. + * On architectures with separate user address space (m68k, s390, parisc, + * sparc64) or those without an MMU, this should always return true. + * + * This version was originally contributed by Jonas Bonn for the + * OpenRISC architecture, and was found to be the most efficient + * for constant 'size' and 'limit' values. + */ +static inline int __access_ok(const void __user *ptr, unsigned long size) +{ + unsigned long limit = user_addr_max(); + unsigned long addr = (unsigned long)ptr; + + if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || + !IS_ENABLED(CONFIG_MMU)) + return true; + + return (size <= limit) && (addr <= (limit - size)); +} +#endif + +#ifndef access_ok +#define access_ok(addr, size) likely(__access_ok(addr, size)) +#endif + +#endif diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 0870fa11a7c5..ebc685dc8d74 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -114,28 +114,9 @@ static inline void set_fs(mm_segment_t fs) } #endif -#ifndef uaccess_kernel -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#endif - -#ifndef user_addr_max -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) -#endif - #endif /* CONFIG_SET_FS */ -#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) - -/* - * The architecture should really override this if possible, at least - * doing a check on the get_fs() - */ -#ifndef __access_ok -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return 1; -} -#endif +#include /* * These are the main single-value transfer routines. They automatically diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67e9bc94dc40..2c31667e62e0 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -33,13 +33,6 @@ typedef struct { /* empty dummy */ } mm_segment_t; -#ifndef TASK_SIZE_MAX -#define TASK_SIZE_MAX TASK_SIZE -#endif - -#define uaccess_kernel() (false) -#define user_addr_max() (TASK_SIZE_MAX) - static inline mm_segment_t force_uaccess_begin(void) { return (mm_segment_t) { }; -- cgit v1.2.3-71-gd317 From 967747bbc084b93b54e66f9047d342232314cd25 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 11 Feb 2022 21:42:45 +0100 Subject: uaccess: remove CONFIG_SET_FS There are no remaining callers of set_fs(), so CONFIG_SET_FS can be removed globally, along with the thread_info field and any references to it. This turns access_ok() into a cheaper check against TASK_SIZE_MAX. As CONFIG_SET_FS is now gone, drop all remaining references to set_fs()/get_fs(), mm_segment_t, user_addr_max() and uaccess_kernel(). Acked-by: Sam Ravnborg # for sparc32 changes Acked-by: "Eric W. Biederman" Tested-by: Sergey Matyukevich # for arc changes Acked-by: Stafford Horne # [openrisc, asm-generic] Acked-by: Dinh Nguyen Signed-off-by: Arnd Bergmann --- arch/Kconfig | 3 -- arch/alpha/Kconfig | 1 - arch/alpha/include/asm/processor.h | 4 -- arch/alpha/include/asm/thread_info.h | 2 - arch/alpha/include/asm/uaccess.h | 19 --------- arch/arc/Kconfig | 1 - arch/arc/include/asm/segment.h | 20 --------- arch/arc/include/asm/thread_info.h | 3 -- arch/arc/include/asm/uaccess.h | 1 - arch/arm/lib/uaccess_with_memcpy.c | 10 ----- arch/arm64/kernel/traps.c | 2 +- arch/csky/Kconfig | 1 - arch/csky/include/asm/processor.h | 2 - arch/csky/include/asm/segment.h | 10 ----- arch/csky/include/asm/thread_info.h | 2 - arch/csky/include/asm/uaccess.h | 3 -- arch/csky/kernel/asm-offsets.c | 1 - arch/h8300/Kconfig | 1 - arch/h8300/include/asm/processor.h | 1 - arch/h8300/include/asm/segment.h | 40 ----------------- arch/h8300/include/asm/thread_info.h | 3 -- arch/h8300/kernel/entry.S | 1 - arch/h8300/kernel/head_ram.S | 1 - arch/h8300/mm/init.c | 6 --- arch/h8300/mm/memory.c | 1 - arch/hexagon/Kconfig | 1 - arch/hexagon/include/asm/thread_info.h | 6 --- arch/hexagon/kernel/process.c | 1 - arch/microblaze/Kconfig | 1 - arch/microblaze/include/asm/thread_info.h | 6 --- arch/microblaze/include/asm/uaccess.h | 24 ----------- arch/microblaze/kernel/asm-offsets.c | 1 - arch/microblaze/kernel/process.c | 1 - arch/nds32/Kconfig | 1 - arch/nds32/include/asm/thread_info.h | 4 -- arch/nds32/include/asm/uaccess.h | 15 +------ arch/nds32/kernel/process.c | 5 +-- arch/nds32/mm/alignment.c | 3 -- arch/nios2/Kconfig | 1 - arch/nios2/include/asm/thread_info.h | 9 ---- arch/nios2/include/asm/uaccess.h | 12 ------ arch/openrisc/Kconfig | 1 - arch/openrisc/include/asm/thread_info.h | 7 --- arch/openrisc/include/asm/uaccess.h | 23 ---------- arch/parisc/include/asm/futex.h | 6 --- arch/parisc/kernel/signal.c | 4 +- arch/parisc/lib/memcpy.c | 2 +- arch/sparc/Kconfig | 1 - arch/sparc/include/asm/processor_32.h | 6 --- arch/sparc/include/asm/uaccess_32.h | 13 ------ arch/sparc/kernel/process_32.c | 2 - arch/xtensa/Kconfig | 1 - arch/xtensa/include/asm/asm-uaccess.h | 71 ------------------------------- arch/xtensa/include/asm/processor.h | 7 --- arch/xtensa/include/asm/thread_info.h | 3 -- arch/xtensa/include/asm/uaccess.h | 16 ------- arch/xtensa/kernel/asm-offsets.c | 3 -- drivers/hid/uhid.c | 2 +- drivers/scsi/sg.c | 5 --- fs/exec.c | 6 --- include/asm-generic/access_ok.h | 14 +----- include/asm-generic/uaccess.h | 25 +---------- include/linux/syscalls.h | 4 -- include/linux/uaccess.h | 33 -------------- include/rdma/ib.h | 2 +- kernel/events/callchain.c | 4 -- kernel/events/core.c | 3 -- kernel/exit.c | 14 ------ kernel/kthread.c | 5 --- kernel/stacktrace.c | 3 -- kernel/trace/bpf_trace.c | 4 -- lib/strncpy_from_user.c | 2 +- lib/strnlen_user.c | 2 +- mm/maccess.c | 11 ----- mm/memory.c | 8 ---- net/bpfilter/bpfilter_kern.c | 2 +- 76 files changed, 14 insertions(+), 531 deletions(-) delete mode 100644 arch/arc/include/asm/segment.h delete mode 100644 arch/csky/include/asm/segment.h delete mode 100644 arch/h8300/include/asm/segment.h (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index fa5db36bda67..99349547afed 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -24,9 +24,6 @@ config KEXEC_ELF config HAVE_IMA_KEXEC bool -config SET_FS - bool - config HOTPLUG_SMT bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 4e87783c90ad..eee8b5b0a58b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -35,7 +35,6 @@ config ALPHA select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 select MMU_GATHER_NO_RANGE - select SET_FS select SPARSEMEM_EXTREME if SPARSEMEM select ZONE_DMA help diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 090499c99c1c..43e234c518b1 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h @@ -26,10 +26,6 @@ #define TASK_UNMAPPED_BASE \ ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) -typedef struct { - unsigned long seg; -} mm_segment_t; - /* This is dead. Everything has been moved to thread_info. */ struct thread_struct { }; #define INIT_THREAD { } diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 2592356e3215..fdc485d7787a 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -19,7 +19,6 @@ struct thread_info { unsigned int flags; /* low level flags */ unsigned int ieee_state; /* see fpu.h */ - mm_segment_t addr_limit; /* thread address space */ unsigned cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ unsigned int status; /* thread-synchronous flags */ @@ -35,7 +34,6 @@ struct thread_info { #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ - .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ } diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 82c5743fc9cd..c32c2584c0b7 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -2,26 +2,7 @@ #ifndef __ALPHA_UACCESS_H #define __ALPHA_UACCESS_H -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * Or at least it did once upon a time. Nowadays it is a mask that - * defines which bits of the address space are off limits. This is a - * wee bit faster than the above. - * - * For historical reasons, these macros are grossly misnamed. - */ - -#define KERNEL_DS ((mm_segment_t) { 0UL }) -#define USER_DS ((mm_segment_t) { -0x40000000000UL }) - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - #include - /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 3c2a4753d09b..e0a60a27e14d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -45,7 +45,6 @@ config ARC select PCI_SYSCALL if PCI select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 - select SET_FS select TRACE_IRQFLAGS_SUPPORT config LOCKDEP_SUPPORT diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h deleted file mode 100644 index 871f8ab11bfd..000000000000 --- a/arch/arc/include/asm/segment.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef __ASMARC_SEGMENT_H -#define __ASMARC_SEGMENT_H - -#ifndef __ASSEMBLY__ - -typedef unsigned long mm_segment_t; - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#define KERNEL_DS MAKE_MM_SEG(0) -#define USER_DS MAKE_MM_SEG(TASK_SIZE) -#define uaccess_kernel() (get_fs() == KERNEL_DS) - -#endif /* __ASSEMBLY__ */ -#endif /* __ASMARC_SEGMENT_H */ diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index d36863e34bfc..1e0b2e3914d5 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -27,7 +27,6 @@ #ifndef __ASSEMBLY__ #include -#include /* * low level task data that entry.S needs immediate access to @@ -40,7 +39,6 @@ struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => BUG */ struct task_struct *task; /* main task structure */ - mm_segment_t addr_limit; /* thread address space */ __u32 cpu; /* current CPU */ unsigned long thr_ptr; /* TLS ptr */ }; @@ -56,7 +54,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } static inline __attribute_const__ struct thread_info *current_thread_info(void) diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 30f80b4be2ab..99712471c96a 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -638,7 +638,6 @@ extern unsigned long arc_clear_user_noinline(void __user *to, #define __clear_user(d, n) arc_clear_user_noinline(d, n) #endif -#include #include #endif diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 106f83a5ea6d..c30b689bec2e 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -92,11 +92,6 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) unsigned long ua_flags; int atomic; - if (uaccess_kernel()) { - memcpy((void *)to, from, n); - return 0; - } - /* the mmap semaphore is taken only if not in an atomic context */ atomic = faulthandler_disabled(); @@ -165,11 +160,6 @@ __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; - if (uaccess_kernel()) { - memset((void *)addr, 0, n); - return 0; - } - mmap_read_lock(current->mm); while (n) { pte_t *pte; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 70fc42470f13..48dcdbdf9a36 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -519,7 +519,7 @@ void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) NOKPROBE_SYMBOL(do_ptrauth_fault); #define __user_cache_maint(insn, address, res) \ - if (address >= user_addr_max()) { \ + if (address >= TASK_SIZE_MAX) { \ res = -EFAULT; \ } else { \ uaccess_ttbr0_enable(); \ diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 132f43f12dd8..75ef86605d69 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -79,7 +79,6 @@ config CSKY select PCI_DOMAINS_GENERIC if PCI select PCI_SYSCALL if PCI select PCI_MSI if PCI - select SET_FS select TRACE_IRQFLAGS_SUPPORT config LOCKDEP_SUPPORT diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 817dd60ff152..688c7548b559 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -4,7 +4,6 @@ #define __ASM_CSKY_PROCESSOR_H #include -#include #include #include #include @@ -59,7 +58,6 @@ struct thread_struct { */ #define start_thread(_regs, _pc, _usp) \ do { \ - set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ (_regs)->regs[2] = 0; \ diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h deleted file mode 100644 index 5bc1cc62b87f..000000000000 --- a/arch/csky/include/asm/segment.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_CSKY_SEGMENT_H -#define __ASM_CSKY_SEGMENT_H - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index 8c349a8f904d..b5ed788f0c68 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -16,7 +16,6 @@ struct thread_info { unsigned long flags; int preempt_count; unsigned long tp_value; - mm_segment_t addr_limit; struct restart_block restart_block; struct pt_regs *regs; unsigned int cpu; @@ -26,7 +25,6 @@ struct thread_info { { \ .task = &tsk, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ .cpu = 0, \ .restart_block = { \ .fn = do_no_restart_syscall, \ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index fec8f77ffc99..2e927c21d8a1 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -3,8 +3,6 @@ #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H -#define user_addr_max() (current_thread_info()->addr_limit.seg) - /* * __put_user_fn */ @@ -200,7 +198,6 @@ unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n); #define __clear_user __clear_user -#include #include #endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c index 1cbcba4b0dd1..d1e903579473 100644 --- a/arch/csky/kernel/asm-offsets.c +++ b/arch/csky/kernel/asm-offsets.c @@ -25,7 +25,6 @@ int main(void) /* offsets into the thread_info struct */ DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TINFO_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 3e3e0f16f7e0..fe48c4f26cc8 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -24,7 +24,6 @@ config H8300 select HAVE_ARCH_KGDB select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS - select SET_FS select UACCESS_MEMCPY config CPU_BIG_ENDIAN diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h index 141a23eb62b7..ba171aa4dacb 100644 --- a/arch/h8300/include/asm/processor.h +++ b/arch/h8300/include/asm/processor.h @@ -13,7 +13,6 @@ #define __ASM_H8300_PROCESSOR_H #include -#include #include #include diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h deleted file mode 100644 index 37950725d9b9..000000000000 --- a/arch/h8300/include/asm/segment.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _H8300_SEGMENT_H -#define _H8300_SEGMENT_H - -/* define constants */ -#define USER_DATA (1) -#ifndef __USER_DS -#define __USER_DS (USER_DATA) -#endif -#define USER_PROGRAM (2) -#define SUPER_DATA (3) -#ifndef __KERNEL_DS -#define __KERNEL_DS (SUPER_DATA) -#endif -#define SUPER_PROGRAM (4) - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define USER_DS MAKE_MM_SEG(__USER_DS) -#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) - -/* - * Get/set the SFC/DFC registers for MOVES instructions - */ - -static inline mm_segment_t get_fs(void) -{ - return USER_DS; -} - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#endif /* __ASSEMBLY__ */ - -#endif /* _H8300_SEGMENT_H */ diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index a518214d4ddd..ff2d873749a4 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -10,7 +10,6 @@ #define _ASM_THREAD_INFO_H #include -#include #ifdef __KERNEL__ @@ -31,7 +30,6 @@ struct thread_info { unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; }; /* @@ -43,7 +41,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* how to get the thread information struct from C */ diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S index c6e289b5f1f2..42db87c17917 100644 --- a/arch/h8300/kernel/entry.S +++ b/arch/h8300/kernel/entry.S @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/h8300/kernel/head_ram.S b/arch/h8300/kernel/head_ram.S index dbf8429f5fab..489462f0ee57 100644 --- a/arch/h8300/kernel/head_ram.S +++ b/arch/h8300/kernel/head_ram.S @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index f7bf4693e3b2..9fa13312720a 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -34,7 +34,6 @@ #include #include -#include #include #include @@ -71,11 +70,6 @@ void __init paging_init(void) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); - /* - * Set up SFC/DFC registers (user data space). - */ - set_fs(USER_DS); - pr_debug("before free_area_init\n"); pr_debug("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c index 4a60e2b5eb96..c950571064d2 100644 --- a/arch/h8300/mm/memory.c +++ b/arch/h8300/mm/memory.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 15dd8f38b698..54eadf265178 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -30,7 +30,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select SET_FS select ARCH_WANT_LD_ORPHAN_WARN select TRACE_IRQFLAGS_SUPPORT help diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index 535976665bf0..e90f280b9ce3 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -22,10 +22,6 @@ #ifndef __ASSEMBLY__ -typedef struct { - unsigned long seg; -} mm_segment_t; - /* * This is union'd with the "bottom" of the kernel stack. * It keeps track of thread info which is handy for routines @@ -37,7 +33,6 @@ struct thread_info { unsigned long flags; /* low level flags */ __u32 cpu; /* current cpu */ int preempt_count; /* 0=>preemptible,<0=>BUG */ - mm_segment_t addr_limit; /* segmentation sux */ /* * used for syscalls somehow; * seems to have a function pointer and four arguments @@ -66,7 +61,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = 1, \ - .addr_limit = KERNEL_DS, \ .sp = 0, \ .regs = NULL, \ } diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 232dfd8956aa..dfa6b2757c05 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -105,7 +105,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, /* * Parent sees new pid -- not necessary, not even possible at * this point in the fork process - * Might also want to set things like ti->addr_limit */ return 0; diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 59798e43cdb0..1fb1cec087b7 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -42,7 +42,6 @@ config MICROBLAZE select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE select SPARSE_IRQ - select SET_FS select ZONE_DMA select TRACE_IRQFLAGS_SUPPORT diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 44f5ca331862..a0ddd2a36fb9 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -56,17 +56,12 @@ struct cpu_context { __u32 fsr; }; -typedef struct { - unsigned long seg; -} mm_segment_t; - struct thread_info { struct task_struct *task; /* main task structure */ unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ - mm_segment_t addr_limit; /* thread address space */ struct cpu_context cpu_context; }; @@ -80,7 +75,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* how to get the thread information struct from C */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index bf9b7657a65a..3aab2f17e046 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -15,30 +15,6 @@ #include #include #include - -/* - * On Microblaze the fs value is actually the top of the corresponding - * address space. - * - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - * - * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. - */ -# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) -# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) - -# define get_fs() (current_thread_info()->addr_limit) -# define set_fs(val) (current_thread_info()->addr_limit = (val)) -# define user_addr_max() get_fs().seg - -# define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - #include # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index b77dd188dec4..47ee409508b1 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -86,7 +86,6 @@ int main(int argc, char *argv[]) /* struct thread_info */ DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); BLANK(); diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 5e2b91c1e8ce..1b944d319d73 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -18,7 +18,6 @@ #include #include #include -#include /* for USER_DS macros */ #include void show_regs(struct pt_regs *regs) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 4d1421b18734..013249430fa3 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -44,7 +44,6 @@ config NDS32 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE - select SET_FS select TRACE_IRQFLAGS_SUPPORT help Andes(nds32) Linux support. diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h index d3967ad184f0..bd8f81cf2ce5 100644 --- a/arch/nds32/include/asm/thread_info.h +++ b/arch/nds32/include/asm/thread_info.h @@ -16,8 +16,6 @@ struct task_struct; #include #include -typedef unsigned long mm_segment_t; - /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -25,12 +23,10 @@ typedef unsigned long mm_segment_t; struct thread_info { unsigned long flags; /* low level flags */ __s32 preempt_count; /* 0 => preemptable, <0 => bug */ - mm_segment_t addr_limit; /* address limit */ }; #define INIT_THREAD_INFO(tsk) \ { \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } #define thread_saved_pc(tsk) ((unsigned long)(tsk->thread.cpu_context.pc)) #define thread_saved_fp(tsk) ((unsigned long)(tsk->thread.cpu_context.fp)) diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 832d642a4068..377548d4451a 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -11,6 +11,7 @@ #include #include #include +#include #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" @@ -33,20 +34,6 @@ struct exception_table_entry { extern int fixup_exception(struct pt_regs *regs); -#define KERNEL_DS ((mm_segment_t) { ~0UL }) -#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) - -#define get_fs() (current_thread_info()->addr_limit) -#define user_addr_max get_fs -#define uaccess_kernel() (get_fs() == KERNEL_DS) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; -} - -#include - /* * Single-value transfer routines. They automatically use the right * size if we just have the right pointer type. Note that the functions diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index 49fab9e39cbf..d35c1f63fa11 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -119,9 +119,8 @@ void show_regs(struct pt_regs *regs) regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]); pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); - pr_info(" IRQs o%s Segment %s\n", - interrupts_enabled(regs) ? "n" : "ff", - uaccess_kernel() ? "kernel" : "user"); + pr_info(" IRQs o%s Segment user\n", + interrupts_enabled(regs) ? "n" : "ff"); } EXPORT_SYMBOL(show_regs); diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index 1eb7ded6992b..9c2c0a454da8 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -512,7 +512,6 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) { unsigned long inst; int ret = -EFAULT; - mm_segment_t seg; inst = get_inst(regs->ipc); @@ -520,12 +519,10 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr, regs->ipc, inst); - seg = force_uaccess_begin(); if (inst & NDS32_16BIT_INSTRUCTION) ret = do_16((inst >> 16) & 0xffff, regs); else ret = do_32(inst, regs); - force_uaccess_end(seg); return ret; } diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 33fd06f5fa41..4167f1eb4cd8 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -24,7 +24,6 @@ config NIOS2 select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU - select SET_FS config GENERIC_CSUM def_bool y diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h index 272d2c72a727..bcc0e9915ebd 100644 --- a/arch/nios2/include/asm/thread_info.h +++ b/arch/nios2/include/asm/thread_info.h @@ -26,10 +26,6 @@ #ifndef __ASSEMBLY__ -typedef struct { - unsigned long seg; -} mm_segment_t; - /* * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line @@ -42,10 +38,6 @@ struct thread_info { unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable,<0 => BUG */ - mm_segment_t addr_limit; /* thread address space: - 0-0x7FFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread - */ struct pt_regs *regs; }; @@ -60,7 +52,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* how to get the thread information struct from C */ diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index 6664ddc0e8e5..b8299082adbe 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -18,18 +18,6 @@ #include #include - -/* - * Segment stuff - */ -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define USER_DS MAKE_MM_SEG(0x80000000UL) -#define KERNEL_DS MAKE_MM_SEG(0) - - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(seg) (current_thread_info()->addr_limit = (seg)) - #include # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index f724b3f1aeed..0d68adf6e02b 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -36,7 +36,6 @@ config OPENRISC select ARCH_WANT_FRAME_POINTERS select GENERIC_IRQ_MULTI_HANDLER select MMU_GATHER_NO_RANGE if MMU - select SET_FS select TRACE_IRQFLAGS_SUPPORT config CPU_BIG_ENDIAN diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 659834ab87fa..4af3049c34c2 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -40,18 +40,12 @@ */ #ifndef __ASSEMBLY__ -typedef unsigned long mm_segment_t; - struct thread_info { struct task_struct *task; /* main task structure */ unsigned long flags; /* low level flags */ __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* thread address space: - 0-0x7FFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread - */ __u8 supervisor_stack[0]; /* saved context data */ @@ -71,7 +65,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ .ksp = 0, \ } diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 8f049ec99b3e..d6500a374e18 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -22,29 +22,6 @@ #include #include #include - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ - -/* addr_limit is the maximum accessible address for the task. we misuse - * the KERNEL_DS and USER_DS values to both assign and compare the - * addr_limit values through the equally misnamed get/set_fs macros. - * (see above) - */ - -#define KERNEL_DS (~0UL) - -#define USER_DS (TASK_SIZE) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs() == KERNEL_DS) - #include /* diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index b5835325d44b..3222206cb3ea 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -96,12 +96,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 val; unsigned long flags; - /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is - * our gateway page, and causes no end of trouble... - */ - if (uaccess_kernel() && !uaddr) - return -EFAULT; - if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 46b1050640b8..cc07bcabf336 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -251,7 +251,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info); start = (unsigned long) frame; - if (start >= user_addr_max() - sigframe_size) + if (start >= TASK_SIZE_MAX - sigframe_size) return -EFAULT; #ifdef CONFIG_64BIT @@ -518,7 +518,7 @@ insert_restart_trampoline(struct pt_regs *regs) long err = 0; /* check that we don't exceed the stack */ - if (A(&usp[0]) >= user_addr_max() - 5 * sizeof(int)) + if (A(&usp[0]) >= TASK_SIZE_MAX - 5 * sizeof(int)) return; /* Setup a trampoline to restart the syscall diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index ea70a0e08321..468704ce8a1c 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -13,7 +13,7 @@ #include #include -#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) +#define get_user_space() (mfsp(3)) #define get_kernel_space() (0) /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 602149f3957f..9200bc04701c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -58,7 +58,6 @@ config SPARC32 select HAVE_UID16 select OLD_SIGACTION select ZONE_DMA - select SET_FS config SPARC64 def_bool 64BIT diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 647bf0ac7beb..b26c35336b51 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -32,10 +32,6 @@ struct fpq { }; #endif -typedef struct { - int seg; -} mm_segment_t; - /* The Sparc processor specific thread struct. */ struct thread_struct { struct pt_regs *kregs; @@ -50,11 +46,9 @@ struct thread_struct { unsigned long fsr; unsigned long fpqdepth; struct fpq fpqueue[16]; - mm_segment_t current_ds; }; #define INIT_THREAD { \ - .current_ds = KERNEL_DS, \ .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \ } diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 367747116260..9fd6c53644b6 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -12,19 +12,6 @@ #include #include - -/* Sparc is not segmented, however we need to be able to fool access_ok() - * when doing system calls from kernel mode legitimately. - * - * "For historical reasons, these macros are grossly misnamed." -Linus - */ - -#define KERNEL_DS ((mm_segment_t) { 0 }) -#define USER_DS ((mm_segment_t) { -1 }) - -#define get_fs() (current->thread.current_ds) -#define set_fs(val) ((current->thread.current_ds) = (val)) - #include /* Uh, these should become the main single-value transfer routines.. diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 2dc0bf9fe62e..88c0c14aaff0 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -300,7 +300,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, extern int nwindows; unsigned long psr; memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); - p->thread.current_ds = KERNEL_DS; ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); childregs->u_regs[UREG_G1] = sp; /* function */ childregs->u_regs[UREG_G2] = arg; @@ -311,7 +310,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, } memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); childregs->u_regs[UREG_FP] = sp; - p->thread.current_ds = USER_DS; ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpsr = current->thread.fork_kpsr | PSR_PIL; ti->kwim = current->thread.fork_kwim; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 8ac599aa6d99..09f7616a0b46 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -40,7 +40,6 @@ config XTENSA select IRQ_DOMAIN select MODULES_USE_ELF_RELA select PERF_USE_VMALLOC - select SET_FS select TRACE_IRQFLAGS_SUPPORT select VIRT_TO_BUS help diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index 7f6cf4151843..7cec869136e3 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -23,76 +23,6 @@ #include #include -/* - * These assembly macros mirror the C macros in asm/uaccess.h. They - * should always have identical functionality. See - * arch/xtensa/kernel/sys.S for usage. - */ - -#define KERNEL_DS 0 -#define USER_DS 1 - -/* - * get_fs reads current->thread.current_ds into a register. - * On Entry: - * anything - * stack - * On Exit: - * contains current->thread.current_ds - */ - .macro get_fs ad, sp - GET_CURRENT(\ad,\sp) -#if THREAD_CURRENT_DS > 1020 - addi \ad, \ad, TASK_THREAD - l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD -#else - l32i \ad, \ad, THREAD_CURRENT_DS -#endif - .endm - -/* - * set_fs sets current->thread.current_ds to some value. - * On Entry: - * anything (temp register) - * value to write - * stack - * On Exit: - * destroyed (actually, current) - * preserved, value to write - */ - .macro set_fs at, av, sp - GET_CURRENT(\at,\sp) - s32i \av, \at, THREAD_CURRENT_DS - .endm - -/* - * kernel_ok determines whether we should bypass addr/size checking. - * See the equivalent C-macro version below for clarity. - * On success, kernel_ok branches to a label indicated by parameter - * . This implies that the macro falls through to the next - * insruction on an error. - * - * Note that while this macro can be used independently, we designed - * in for optimal use in the access_ok macro below (i.e., we fall - * through on error). - * - * On Entry: - * anything (temp register) - * label to branch to on success; implies - * fall-through macro on error - * stack pointer - * On Exit: - * destroyed (actually, current->thread.current_ds) - */ - -#if ((KERNEL_DS != 0) || (USER_DS == 0)) -# error Assembly macro kernel_ok fails -#endif - .macro kernel_ok at, sp, success - get_fs \at, \sp - beqz \at, \success - .endm - /* * user_ok determines whether the access to user-space memory is allowed. * See the equivalent C-macro version below for clarity. @@ -147,7 +77,6 @@ * destroyed */ .macro access_ok aa, as, at, sp, error - kernel_ok \at, \sp, .Laccess_ok_\@ user_ok \aa, \as, \at, \error .Laccess_ok_\@: .endm diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 37d3e9887fe7..abad7c3df46f 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -152,18 +152,12 @@ */ #define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg))) -typedef struct { - unsigned long seg; -} mm_segment_t; - struct thread_struct { /* kernel's return address and stack pointer for context switching */ unsigned long ra; /* kernel's a0: return address and window call size */ unsigned long sp; /* kernel's a1: stack pointer */ - mm_segment_t current_ds; /* see uaccess.h for example uses */ - /* struct xtensa_cpuinfo info; */ unsigned long bad_vaddr; /* last user fault */ @@ -186,7 +180,6 @@ struct thread_struct { { \ ra: 0, \ sp: sizeof(init_stack) + (long) &init_stack, \ - current_ds: {0}, \ /*info: {0}, */ \ bad_vaddr: 0, \ bad_uaddr: 0, \ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index a312333a9add..f6fcbba1d02f 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -52,8 +52,6 @@ struct thread_info { __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ - mm_segment_t addr_limit; /* thread address space */ - unsigned long cpenable; #if XCHAL_HAVE_EXCLUSIVE /* result of the most recent exclusive store */ @@ -81,7 +79,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* how to get the thread information struct from C */ diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 0edd9e4b23d0..56aec6d504fe 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -19,22 +19,6 @@ #include #include #include - -/* - * The fs value determines whether argument validity checking should - * be performed or not. If get_fs() == USER_DS, checking is - * performed, with get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons (Data Segment Register?), these macros are - * grossly misnamed. - */ - -#define KERNEL_DS ((mm_segment_t) { 0 }) -#define USER_DS ((mm_segment_t) { 1 }) - -#define get_fs() (current->thread.current_ds) -#define set_fs(val) (current->thread.current_ds = (val)) - #include /* diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index dc5c83cad9be..f1fd1390d069 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -87,7 +87,6 @@ int main(void) OFFSET(TI_STSTUS, thread_info, status); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); /* struct thread_info (offset from start_struct) */ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); @@ -108,8 +107,6 @@ int main(void) #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); - DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \ - thread.current_ds)); /* struct mm_struct */ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 614adb510dbd..2a918aeb0af1 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -747,7 +747,7 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer, * copied from, so it's unsafe to allow this with elevated * privileges (e.g. from a setuid binary) or via kernel_write(). */ - if (file->f_cred != current_cred() || uaccess_kernel()) { + if (file->f_cred != current_cred()) { pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n", task_tgid_vnr(current), current->comm); ret = -EACCES; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 6b43e97bd417..aaa2376b9d34 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -224,11 +224,6 @@ static int sg_check_file_access(struct file *filp, const char *caller) caller, task_tgid_vnr(current), current->comm); return -EPERM; } - if (uaccess_kernel()) { - pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", - caller, task_tgid_vnr(current), current->comm); - return -EACCES; - } return 0; } diff --git a/fs/exec.c b/fs/exec.c index 79f2c9483302..bc68a0c089ac 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1303,12 +1303,6 @@ int begin_new_exec(struct linux_binprm * bprm) if (retval) goto out_unlock; - /* - * Ensure that the uaccess routines can actually operate on userspace - * pointers: - */ - force_uaccess_begin(); - if (me->flags & PF_KTHREAD) free_kthread_struct(me); me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h index d38cc5dad65b..2866ae61b1cd 100644 --- a/include/asm-generic/access_ok.h +++ b/include/asm-generic/access_ok.h @@ -16,18 +16,6 @@ #define TASK_SIZE_MAX TASK_SIZE #endif -#ifndef uaccess_kernel -#ifdef CONFIG_SET_FS -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#else -#define uaccess_kernel() (0) -#endif -#endif - -#ifndef user_addr_max -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE_MAX) -#endif - #ifndef __access_ok /* * 'size' is a compile-time constant for most callers, so optimize for @@ -42,7 +30,7 @@ */ static inline int __access_ok(const void __user *ptr, unsigned long size) { - unsigned long limit = user_addr_max(); + unsigned long limit = TASK_SIZE_MAX; unsigned long addr = (unsigned long)ptr; if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index ebc685dc8d74..a5be9e61a2a2 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -8,6 +8,7 @@ * address space, e.g. all NOMMU machines. */ #include +#include #ifdef CONFIG_UACCESS_MEMCPY #include @@ -94,30 +95,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) #define INLINE_COPY_TO_USER #endif /* CONFIG_UACCESS_MEMCPY */ -#ifdef CONFIG_SET_FS -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#ifndef KERNEL_DS -#define KERNEL_DS MAKE_MM_SEG(~0UL) -#endif - -#ifndef USER_DS -#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) -#endif - -#ifndef get_fs -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; -} -#endif - -#endif /* CONFIG_SET_FS */ - -#include - /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 819c0cb00b6d..a34b0f9a9972 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -290,10 +290,6 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(uaccess_kernel(), - "Invalid address limit on user-mode return")) - force_sig(SIGKILL); - #ifdef TIF_FSCHECK clear_thread_flag(TIF_FSCHECK); #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 2c31667e62e0..2421a41f3a8e 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -10,39 +10,6 @@ #include -#ifdef CONFIG_SET_FS -/* - * Force the uaccess routines to be wired up for actual userspace access, - * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone - * using force_uaccess_end below. - */ -static inline mm_segment_t force_uaccess_begin(void) -{ - mm_segment_t fs = get_fs(); - - set_fs(USER_DS); - return fs; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ - set_fs(oldfs); -} -#else /* CONFIG_SET_FS */ -typedef struct { - /* empty dummy */ -} mm_segment_t; - -static inline mm_segment_t force_uaccess_begin(void) -{ - return (mm_segment_t) { }; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ -} -#endif /* CONFIG_SET_FS */ - /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 83139b9ce409..f7c185ff7a11 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -75,7 +75,7 @@ struct sockaddr_ib { */ static inline bool ib_safe_file_access(struct file *filp) { - return filp->f_cred == current_cred() && !uaccess_kernel(); + return filp->f_cred == current_cred(); } #endif /* _RDMA_IB_H */ diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 58cbe357fb2b..1273be84392c 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -209,17 +209,13 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, } if (regs) { - mm_segment_t fs; - if (crosstask) goto exit_put; if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); - fs = force_uaccess_begin(); perf_callchain_user(&ctx, regs); - force_uaccess_end(fs); } } diff --git a/kernel/events/core.c b/kernel/events/core.c index 57c7197838db..11ca7303d6df 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6746,7 +6746,6 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, unsigned long sp; unsigned int rem; u64 dyn_size; - mm_segment_t fs; /* * We dump: @@ -6764,9 +6763,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); - fs = force_uaccess_begin(); rem = __output_copy_user(handle, (void *) sp, dump_size); - force_uaccess_end(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..0884a75bc2f8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -737,20 +737,6 @@ void __noreturn do_exit(long code) WARN_ON(blk_needs_flush_plug(tsk)); - /* - * If do_dead is called because this processes oopsed, it's possible - * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before - * continuing. Amongst other possible reasons, this is to prevent - * mm_release()->clear_child_tid() from writing to a user-controlled - * kernel address. - * - * On uptodate architectures force_uaccess_begin is a noop. On - * architectures that still have set_fs/get_fs in addition to handling - * oopses handles kernel threads that run as set_fs(KERNEL_DS) by - * default. - */ - force_uaccess_begin(); - kcov_task_exit(tsk); coredump_task_exit(tsk); diff --git a/kernel/kthread.c b/kernel/kthread.c index 38c6dd822da8..16c2275d4b50 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -55,7 +55,6 @@ struct kthread { int result; int (*threadfn)(void *); void *data; - mm_segment_t oldfs; struct completion parked; struct completion exited; #ifdef CONFIG_BLK_CGROUP @@ -1441,8 +1440,6 @@ void kthread_use_mm(struct mm_struct *mm) mmdrop(active_mm); else smp_mb(); - - to_kthread(tsk)->oldfs = force_uaccess_begin(); } EXPORT_SYMBOL_GPL(kthread_use_mm); @@ -1457,8 +1454,6 @@ void kthread_unuse_mm(struct mm_struct *mm) WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); WARN_ON_ONCE(!tsk->mm); - force_uaccess_end(to_kthread(tsk)->oldfs); - task_lock(tsk); /* * When a kthread stops operating on an address space, the loop diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 9c625257023d..9ed5ce989415 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -226,15 +226,12 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) .store = store, .size = size, }; - mm_segment_t fs; /* Trace user stack if not a kernel thread */ if (current->flags & PF_KTHREAD) return 0; - fs = force_uaccess_begin(); arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); - force_uaccess_end(fs); return c.len; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 21aa30644219..8115fff17018 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -332,8 +332,6 @@ BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, if (unlikely(in_interrupt() || current->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; - if (unlikely(uaccess_kernel())) - return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; @@ -835,8 +833,6 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) */ if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; - if (unlikely(uaccess_kernel())) - return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 122d8d0e253c..08fc72d3ed16 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -120,7 +120,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) if (unlikely(count <= 0)) return 0; - max_addr = user_addr_max(); + max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(src); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 1616710b8a82..bffa0ebf9f8b 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -96,7 +96,7 @@ long strnlen_user(const char __user *str, long count) if (unlikely(count <= 0)) return 0; - max_addr = user_addr_max(); + max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(str); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; diff --git a/mm/maccess.c b/mm/maccess.c index cbd1b3959af2..106820b33a2b 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -113,14 +113,11 @@ Efault: long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = force_uaccess_begin(); - if (access_ok(src, size)) { pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); } - force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -140,14 +137,12 @@ EXPORT_SYMBOL_GPL(copy_from_user_nofault); long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = force_uaccess_begin(); if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } - force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -176,17 +171,14 @@ EXPORT_SYMBOL_GPL(copy_to_user_nofault); long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { - mm_segment_t old_fs; long ret; if (unlikely(count <= 0)) return 0; - old_fs = force_uaccess_begin(); pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); - force_uaccess_end(old_fs); if (ret >= count) { ret = count; @@ -216,14 +208,11 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { - mm_segment_t old_fs; int ret; - old_fs = force_uaccess_begin(); pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); - force_uaccess_end(old_fs); return ret; } diff --git a/mm/memory.c b/mm/memory.c index c125c4969913..9a6ebf68a846 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5256,14 +5256,6 @@ void print_vma_addr(char *prefix, unsigned long ip) #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void __might_fault(const char *file, int line) { - /* - * Some code (nfs/sunrpc) uses socket ops on kernel memory while - * holding the mmap_lock, this is safe because kernel memory doesn't - * get paged out, therefore we'll never actually fault, and the - * below annotations will generate false positives. - */ - if (uaccess_kernel()) - return; if (pagefault_disabled()) return; __might_sleep(file, line); diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 51a941b56ec3..422ec6e7ccff 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -70,7 +70,7 @@ static int bpfilter_process_sockopt(struct sock *sk, int optname, .addr = (uintptr_t)optval.user, .len = optlen, }; - if (uaccess_kernel() || sockptr_is_kernel(optval)) { + if (sockptr_is_kernel(optval)) { pr_err("kernel access not supported\n"); return -EFAULT; } -- cgit v1.2.3-71-gd317 From 2c861b73a23b1237bdd054b6023bb27f6747c2b9 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Sat, 19 Feb 2022 16:28:13 +0100 Subject: math64: New DIV_U64_ROUND_CLOSEST helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide DIV_U64_ROUND_CLOSEST helper which uses div_u64 to perform division rounded to the closest integer using unsigned 64bit dividend and unsigned 32bit divisor. Reviewed-by: Marek Behún Signed-off-by: Pali Rohár Signed-off-by: Marek Behún Link: https://lore.kernel.org/r/20220219152818.4319-2-kabel@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/math64.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/math64.h b/include/linux/math64.h index 2928f03d6d46..a14f40de1dca 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -300,6 +300,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) +/* + * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer + * @dividend: unsigned 64bit dividend + * @divisor: unsigned 32bit divisor + * + * Divide unsigned 64bit dividend by unsigned 32bit divisor + * and round to closest integer. + * + * Return: dividend / divisor rounded to nearest integer + */ +#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \ + ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); }) + /* * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer * @dividend: signed 64bit dividend -- cgit v1.2.3-71-gd317 From 085a884434f3e3b08349a0ba0904f9f561739d57 Mon Sep 17 00:00:00 2001 From: Richard Gong Date: Wed, 23 Feb 2022 08:49:08 -0600 Subject: firmware: stratix10-svc: extend SVC driver to get the firmware version Extend Intel service layer driver to get the firmware version running at FPGA device. Therefore FPGA manager driver, one of Intel service layer driver's client, can decide whether to handle the newly added bitstream authentication function based on the retrieved firmware version. Link: https://lore.kernel.org/lkml/1617114785-22211-2-git-send-email-richard.gong@linux.intel.com Acked-by: Moritz Fischr Signed-off-by: Richard Gong Signed-off-by: Dinh Nguyen Link: https://lore.kernel.org/r/20220223144908.399522-2-dinguyen@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/stratix10-svc.c | 9 ++++++++- include/linux/firmware/intel/stratix10-smc.h | 21 +++++++++++++++++++-- include/linux/firmware/intel/stratix10-svc-client.h | 4 ++++ 3 files changed, 31 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 29c0a616b317..4bd57a908efe 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -306,6 +306,7 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, break; case COMMAND_RSU_RETRY: case COMMAND_RSU_MAX_RETRY: + case COMMAND_FIRMWARE_VERSION: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; break; @@ -422,6 +423,11 @@ static int svc_normal_to_secure_thread(void *data) a1 = 0; a2 = 0; break; + case COMMAND_FIRMWARE_VERSION: + a0 = INTEL_SIP_SMC_FIRMWARE_VERSION; + a1 = 0; + a2 = 0; + break; default: pr_warn("it shouldn't happen\n"); break; @@ -491,7 +497,8 @@ static int svc_normal_to_secure_thread(void *data) */ if ((pdata->command == COMMAND_RSU_RETRY) || (pdata->command == COMMAND_RSU_MAX_RETRY) || - (pdata->command == COMMAND_RSU_NOTIFY)) { + (pdata->command == COMMAND_RSU_NOTIFY) || + (pdata->command == COMMAND_FIRMWARE_VERSION)) { cbdata->status = BIT(SVC_STATUS_NO_SUPPORT); cbdata->kaddr1 = NULL; diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index c3e5ab014caf..aad497a9ad8b 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -321,8 +321,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_ECC_DBE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) -#endif - /** * Request INTEL_SIP_SMC_RSU_NOTIFY * @@ -404,3 +402,22 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18 #define INTEL_SIP_SMC_RSU_MAX_RETRY \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY) + +/** + * Request INTEL_SIP_SMC_FIRMWARE_VERSION + * + * Sync call used to query the version of running firmware + * + * Call register usage: + * a0 INTEL_SIP_SMC_FIRMWARE_VERSION + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR + * a1 running firmware version + */ +#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31 +#define INTEL_SIP_SMC_FIRMWARE_VERSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION) + +#endif diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 19781b0f6429..18c1841fdb1f 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -104,6 +104,9 @@ struct stratix10_svc_chan; * * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status * is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status + * is SVC_STATUS_OK or SVC_STATUS_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, @@ -117,6 +120,7 @@ enum stratix10_svc_command_code { COMMAND_RSU_RETRY, COMMAND_RSU_MAX_RETRY, COMMAND_RSU_DCMF_VERSION, + COMMAND_FIRMWARE_VERSION, }; /** -- cgit v1.2.3-71-gd317 From f1d0821bf37ba3cecee0fd1e9ae72a943a69d01d Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Wed, 9 Feb 2022 00:27:07 -0800 Subject: firmware: xilinx: Add support for runtime features Add support for runtime features by using an IOCTL call. The features can be enabled or disabled from the firmware as well as the features can be configured at runtime by querying IOCTL_SET_FEATURE_CONFIG id. Similarly, the user can get the configured values of features by querying IOCTL_GET_FEATURE_CONFIG id. Acked-by: Michal Simek Signed-off-by: Ronak Jain Link: https://lore.kernel.org/r/20220209082709.32378-2-ronak.jain@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/xilinx/zynqmp.c | 27 +++++++++++++++++++++++++++ include/linux/firmware/xlnx-zynqmp.h | 25 +++++++++++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 450c5f6a1cbf..0fa6cae4969d 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1156,6 +1156,33 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) 0, 0, NULL); } +/** + * zynqmp_pm_set_feature_config - PM call to request IOCTL for feature config + * @id: The config ID of the feature to be configured + * @value: The config value of the feature to be configured + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG, + id, value, NULL); +} + +/** + * zynqmp_pm_get_feature_config - PM call to get value of configured feature + * @id: The config id of the feature to be queried + * @payload: Returned value array + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, + u32 *payload) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG, + id, 0, payload); +} + /** * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope * @subtype: Shutdown subtype diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 907cb01890cf..cf557fbeb8c7 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -143,6 +143,9 @@ enum pm_ioctl_id { IOCTL_OSPI_MUX_SELECT = 21, /* Register SGI to ATF */ IOCTL_REGISTER_SGI = 25, + /* Runtime feature configuration */ + IOCTL_SET_FEATURE_CONFIG = 26, + IOCTL_GET_FEATURE_CONFIG = 27, }; enum pm_query_id { @@ -376,6 +379,14 @@ enum ospi_mux_select_type { PM_OSPI_MUX_SEL_LINEAR = 1, }; +enum pm_feature_config_id { + PM_FEATURE_INVALID = 0, + PM_FEATURE_OVERTEMP_STATUS = 1, + PM_FEATURE_OVERTEMP_VALUE = 2, + PM_FEATURE_EXTWDT_STATUS = 3, + PM_FEATURE_EXTWDT_VALUE = 4, +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -447,6 +458,8 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address); int zynqmp_pm_register_notifier(const u32 node, const u32 event, const u32 wake, const u32 enable); int zynqmp_pm_feature(const u32 api_id); +int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); +int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -689,6 +702,18 @@ static inline int zynqmp_pm_feature(const u32 api_id) { return -ENODEV; } + +static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, + u32 value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, + u32 *payload) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ -- cgit v1.2.3-71-gd317 From 2502960fba7e94e090112069694365295c32ccc5 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 14 Feb 2022 14:07:57 +0800 Subject: component: Add common helper for compare/release functions The component requires the compare/release functions, there are so many copies in current kernel. Just define four common helpers for them. Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Yong Wu Link: https://lore.kernel.org/r/20220214060819.7334-2-yong.wu@mediatek.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/component.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/component.h | 6 +++++ 2 files changed, 64 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/component.c b/drivers/base/component.c index 34f9e0802719..5eadeac6c532 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -283,6 +284,63 @@ static void take_down_aggregate_device(struct aggregate_device *adev) } } +/** + * component_compare_of - A common component compare function for of_node + * @dev: component device + * @data: @compare_data from component_match_add_release() + * + * A common compare function when compare_data is device of_node. e.g. + * component_match_add_release(masterdev, &match, component_release_of, + * component_compare_of, component_dev_of_node) + */ +int component_compare_of(struct device *dev, void *data) +{ + return device_match_of_node(dev, data); +} +EXPORT_SYMBOL_GPL(component_compare_of); + +/** + * component_release_of - A common component release function for of_node + * @dev: component device + * @data: @compare_data from component_match_add_release() + * + * About the example, Please see component_compare_of(). + */ +void component_release_of(struct device *dev, void *data) +{ + of_node_put(data); +} +EXPORT_SYMBOL_GPL(component_release_of); + +/** + * component_compare_dev - A common component compare function for dev + * @dev: component device + * @data: @compare_data from component_match_add_release() + * + * A common compare function when compare_data is struce device. e.g. + * component_match_add(masterdev, &match, component_compare_dev, component_dev) + */ +int component_compare_dev(struct device *dev, void *data) +{ + return dev == data; +} +EXPORT_SYMBOL_GPL(component_compare_dev); + +/** + * component_compare_dev_name - A common component compare function for device name + * @dev: component device + * @data: @compare_data from component_match_add_release() + * + * A common compare function when compare_data is device name string. e.g. + * component_match_add(masterdev, &match, component_compare_dev_name, + * "component_dev_name") + */ +int component_compare_dev_name(struct device *dev, void *data) +{ + return device_match_name(dev, data); +} +EXPORT_SYMBOL_GPL(component_compare_dev_name); + static void devm_component_match_release(struct device *parent, void *res) { struct component_match *match = res; diff --git a/include/linux/component.h b/include/linux/component.h index 7012569c6546..df4aa75c9e7c 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -82,6 +82,12 @@ struct component_master_ops { void (*unbind)(struct device *master); }; +/* A set helper functions for component compare/release */ +int component_compare_of(struct device *dev, void *data); +void component_release_of(struct device *dev, void *data); +int component_compare_dev(struct device *dev, void *data); +int component_compare_dev_name(struct device *dev, void *data); + void component_master_del(struct device *, const struct component_master_ops *); -- cgit v1.2.3-71-gd317 From 9584e7263e9ebcd94b184dc3efc847355a624220 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 13 Jan 2022 16:48:54 +0200 Subject: ARM: at91: PM: add cpu idle support for sama7g5 Add CPU idle support for SAMA7G5. Support will make use of PMC_CPU_RATIO register to divide the CPU clock by 16 before switching it to idle and use automatic self-refresh option of DDR controller. Signed-off-by: Claudiu Beznea Acked-by: Stephen Boyd Signed-off-by: Nicolas Ferre Link: https://lore.kernel.org/r/20220113144900.906370-5-claudiu.beznea@microchip.com --- arch/arm/mach-at91/pm.c | 27 ++++++++++++++++++++++++++- include/linux/clk/at91_pmc.h | 4 ++++ include/soc/at91/sama7-ddr.h | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index dd6f4ce3f766..0fd609e26615 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -605,6 +605,30 @@ static void at91sam9_sdram_standby(void) at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); } +static void sama7g5_standby(void) +{ + int pwrtmg, ratio; + + pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL); + ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO); + + /* + * Place RAM into self-refresh after a maximum idle clocks. The maximum + * idle clocks is configured by bootloader in + * UDDRC_PWRMGT.SELFREF_TO_X32. + */ + writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN, + soc_pm.data.ramc[0] + UDDRC_PWRCTL); + /* Divide CPU clock by 16. */ + writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO); + + cpu_do_idle(); + + /* Restore previous configuration. */ + writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO); + writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL); +} + struct ramc_info { void (*idle)(void); unsigned int memctrl; @@ -615,6 +639,7 @@ static const struct ramc_info ramc_infos[] __initconst = { { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC}, { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, + { .idle = sama7g5_standby, }, }; static const struct of_device_id ramc_ids[] __initconst = { @@ -622,7 +647,7 @@ static const struct of_device_id ramc_ids[] __initconst = { { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] }, - { .compatible = "microchip,sama7g5-uddrc", }, + { .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], }, { /*sentinel*/ } }; diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index ccb3f034bfa9..3484309b59bf 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -78,6 +78,10 @@ #define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ #define AT91_CKGR_PLLAR 0x28 /* PLL A Register */ + +#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */ +#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */ + #define AT91_CKGR_PLLBR 0x2c /* PLL B Register */ #define AT91_PMC_DIV (0xff << 0) /* Divider */ #define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h index fee1b11bddca..9e17247474fa 100644 --- a/include/soc/at91/sama7-ddr.h +++ b/include/soc/at91/sama7-ddr.h @@ -53,6 +53,7 @@ #define UDDRC_STAT_OPMODE_MSK (0x7 << 0) /* Operating mode mask */ #define UDDRC_PWRCTL (0x30) /* UDDRC Low Power Control Register */ +#define UDDRC_PWRCTL_SELFREF_EN (1 << 0) /* Automatic self-refresh */ #define UDDRC_PWRCTL_SELFREF_SW (1 << 5) /* Software self-refresh */ #define UDDRC_DFIMISC (0x1B0) /* UDDRC DFI Miscellaneous Control Register */ -- cgit v1.2.3-71-gd317 From 8b4195cd6dc3f1f0ab457d23d21e9f72fde0760a Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 23 Feb 2022 14:43:47 +0100 Subject: mtd: spi-nor: move all xilinx specifics into xilinx.c Mechanically move all the xilinx functions to its own module. Then register the new flash specific ready() function. Signed-off-by: Michael Walle Signed-off-by: Tudor Ambarus Reviewed-by: Pratyush Yadav Link: https://lore.kernel.org/r/20220223134358.1914798-22-michael@walle.cc --- drivers/mtd/spi-nor/core.c | 64 +------------------------------------- drivers/mtd/spi-nor/core.h | 18 ----------- drivers/mtd/spi-nor/xilinx.c | 73 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/spi-nor.h | 9 ------ 4 files changed, 74 insertions(+), 90 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 8481272533a3..ae1560250c48 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -598,57 +598,6 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear) return ret; } -/** - * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. - * @nor: pointer to 'struct spi_nor'. - * @sr: pointer to a DMA-able buffer where the value of the - * Status Register will be written. - * - * Return: 0 on success, -errno otherwise. - */ -int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) -{ - int ret; - - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, sr, 0)); - - spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); - - ret = spi_mem_exec_op(nor->spimem, &op); - } else { - ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, - 1); - } - - if (ret) - dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); - - return ret; -} - -/** - * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if - * the flash is ready for new commands. - * @nor: pointer to 'struct spi_nor'. - * - * Return: 1 if ready, 0 if not ready, -errno on errors. - */ -static int spi_nor_xsr_ready(struct spi_nor *nor) -{ - int ret; - - ret = spi_nor_xread_sr(nor, nor->bouncebuf); - if (ret) - return ret; - - return !!(nor->bouncebuf[0] & XSR_RDY); -} - /** * spi_nor_clear_sr() - Clear the Status Register. * @nor: pointer to 'struct spi_nor'. @@ -798,10 +747,7 @@ static int spi_nor_ready(struct spi_nor *nor) if (nor->params->ready) return nor->params->ready(nor); - if (nor->flags & SNOR_F_READY_XSR_RDY) - sr = spi_nor_xsr_ready(nor); - else - sr = spi_nor_sr_ready(nor); + sr = spi_nor_sr_ready(nor); if (sr < 0) return sr; fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; @@ -2677,14 +2623,6 @@ static void spi_nor_init_flags(struct spi_nor *nor) if (flags & USE_FSR) nor->flags |= SNOR_F_USE_FSR; - - /* - * Make sure the XSR_RDY flag is set before calling - * spi_nor_wait_till_ready(). Xilinx S3AN share MFR - * with Atmel SPI NOR. - */ - if (flags & SPI_NOR_XSR_RDY) - nor->flags |= SNOR_F_READY_XSR_RDY; } /** diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 3c37b46d60d5..fabc01ae9a81 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -15,7 +15,6 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), SNOR_F_HAS_SR_TB = BIT(1), SNOR_F_NO_OP_CHIP_ERASE = BIT(2), - SNOR_F_READY_XSR_RDY = BIT(3), SNOR_F_USE_CLSR = BIT(4), SNOR_F_BROKEN_RESET = BIT(5), SNOR_F_4B_OPCODES = BIT(6), @@ -351,8 +350,6 @@ struct spi_nor_fixups { * SPI_NOR_NO_FR: can't do fastread. * USE_CLSR: use CLSR command. * USE_FSR: use flag status register - * SPI_NOR_XSR_RDY: S3AN flashes have specific opcode to read the - * status register. * * @no_sfdp_flags: flags that indicate support that can be discovered via SFDP. * Used when SFDP tables are not defined in the flash. These @@ -405,7 +402,6 @@ struct flash_info { #define SPI_NOR_NO_FR BIT(8) #define USE_CLSR BIT(9) #define USE_FSR BIT(10) -#define SPI_NOR_XSR_RDY BIT(11) u8 no_sfdp_flags; #define SPI_NOR_SKIP_SFDP BIT(0) @@ -462,19 +458,6 @@ struct flash_info { .addr_width = (_addr_width), \ .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \ -#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \ - .id = { \ - ((_jedec_id) >> 16) & 0xff, \ - ((_jedec_id) >> 8) & 0xff, \ - (_jedec_id) & 0xff \ - }, \ - .id_len = 3, \ - .sector_size = (8 * (_page_size)), \ - .n_sectors = (_n_sectors), \ - .page_size = (_page_size), \ - .addr_width = 3, \ - .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY, - #define OTP_INFO(_len, _n_regions, _base, _offset) \ .otp_org = { \ .len = (_len), \ @@ -564,7 +547,6 @@ int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len); int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1); int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr); -int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf); ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c index 07dd11788df5..05c7fe843a7d 100644 --- a/drivers/mtd/spi-nor/xilinx.c +++ b/drivers/mtd/spi-nor/xilinx.c @@ -8,6 +8,27 @@ #include "core.h" +#define SPINOR_OP_XSE 0x50 /* Sector erase */ +#define SPINOR_OP_XPP 0x82 /* Page program */ +#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ + +#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ +#define XSR_RDY BIT(7) /* Ready */ + +#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \ + .id = { \ + ((_jedec_id) >> 16) & 0xff, \ + ((_jedec_id) >> 8) & 0xff, \ + (_jedec_id) & 0xff \ + }, \ + .id_len = 3, \ + .sector_size = (8 * (_page_size)), \ + .n_sectors = (_n_sectors), \ + .page_size = (_page_size), \ + .addr_width = 3, \ + .flags = SPI_NOR_NO_FR + +/* Xilinx S3AN share MFR with Atmel SPI NOR */ static const struct flash_info xilinx_nor_parts[] = { /* Xilinx S3AN Internal Flash */ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) }, @@ -38,6 +59,57 @@ static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr) return page | offset; } +/** + * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. + * @nor: pointer to 'struct spi_nor'. + * @sr: pointer to a DMA-able buffer where the value of the + * Status Register will be written. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, sr, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); + + return ret; +} + +/** + * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if + * the flash is ready for new commands. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_xsr_ready(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_xread_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return !!(nor->bouncebuf[0] & XSR_RDY); +} + static int xilinx_nor_setup(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps) { @@ -83,6 +155,7 @@ static int xilinx_nor_setup(struct spi_nor *nor, static void xilinx_nor_late_init(struct spi_nor *nor) { nor->params->setup = xilinx_nor_setup; + nor->params->ready = spi_nor_xsr_ready; } static const struct spi_nor_fixups xilinx_nor_fixups = { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index fc90fce26e33..b44b05a6f934 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -86,15 +86,6 @@ #define SPINOR_OP_BP 0x02 /* Byte program */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ -/* Used for S3AN flashes only */ -#define SPINOR_OP_XSE 0x50 /* Sector erase */ -#define SPINOR_OP_XPP 0x82 /* Page program */ -#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ - -#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ -#define XSR_RDY BIT(7) /* Ready */ - - /* Used for Macronix and Winbond flashes. */ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ -- cgit v1.2.3-71-gd317 From c770abe52d81089a8b8ecd1fe42722e29bbab5f5 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 23 Feb 2022 14:43:50 +0100 Subject: mtd: spi-nor: move all micron-st specifics into micron-st.c The flag status register is only available on micron flashes. Move all the functions around that into the micron module. This is almost a mechanical move except for the spi_nor_fsr_ready() which now also checks the normal status register. Previously, this was done in spi_nor_ready(). Signed-off-by: Michael Walle Signed-off-by: Tudor Ambarus Tested-by: Pratyush Yadav # on mt35xu512aba, s28hs512t Reviewed-by: Pratyush Yadav Link: https://lore.kernel.org/r/20220223134358.1914798-25-michael@walle.cc --- drivers/mtd/spi-nor/core.c | 123 +----------------------------------- drivers/mtd/spi-nor/micron-st.c | 134 ++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/spi-nor.h | 8 --- 3 files changed, 135 insertions(+), 130 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index ae1560250c48..5b56d718692b 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -412,50 +412,6 @@ int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) return ret; } -/** - * spi_nor_read_fsr() - Read the Flag Status Register. - * @nor: pointer to 'struct spi_nor' - * @fsr: pointer to a DMA-able buffer where the value of the - * Flag Status Register will be written. Should be at least 2 - * bytes. - * - * Return: 0 on success, -errno otherwise. - */ -static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) -{ - int ret; - - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, fsr, 0)); - - if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { - op.addr.nbytes = nor->params->rdsr_addr_nbytes; - op.dummy.nbytes = nor->params->rdsr_dummy; - /* - * We don't want to read only one byte in DTR mode. So, - * read 2 and then discard the second byte. - */ - op.data.nbytes = 2; - } - - spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); - - ret = spi_mem_exec_op(nor->spimem, &op); - } else { - ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, - 1); - } - - if (ret) - dev_dbg(nor->dev, "error %d reading FSR\n", ret); - - return ret; -} - /** * spi_nor_read_cr() - Read the Configuration Register using the * SPINOR_OP_RDCR (35h) command. @@ -664,75 +620,6 @@ int spi_nor_sr_ready(struct spi_nor *nor) return !(nor->bouncebuf[0] & SR_WIP); } -/** - * spi_nor_clear_fsr() - Clear the Flag Status Register. - * @nor: pointer to 'struct spi_nor'. - */ -static void spi_nor_clear_fsr(struct spi_nor *nor) -{ - int ret; - - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_NO_DATA); - - spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); - - ret = spi_mem_exec_op(nor->spimem, &op); - } else { - ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, - NULL, 0); - } - - if (ret) - dev_dbg(nor->dev, "error %d clearing FSR\n", ret); -} - -/** - * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is - * ready for new commands. - * @nor: pointer to 'struct spi_nor'. - * - * Return: 1 if ready, 0 if not ready, -errno on errors. - */ -static int spi_nor_fsr_ready(struct spi_nor *nor) -{ - int ret = spi_nor_read_fsr(nor, nor->bouncebuf); - - if (ret) - return ret; - - if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { - if (nor->bouncebuf[0] & FSR_E_ERR) - dev_err(nor->dev, "Erase operation failed.\n"); - else - dev_err(nor->dev, "Program operation failed.\n"); - - if (nor->bouncebuf[0] & FSR_PT_ERR) - dev_err(nor->dev, - "Attempted to modify a protected sector.\n"); - - spi_nor_clear_fsr(nor); - - /* - * WEL bit remains set to one when an erase or page program - * error occurs. Issue a Write Disable command to protect - * against inadvertent writes that can possibly corrupt the - * contents of the memory. - */ - ret = spi_nor_write_disable(nor); - if (ret) - return ret; - - return -EIO; - } - - return !!(nor->bouncebuf[0] & FSR_READY); -} - /** * spi_nor_ready() - Query the flash to see if it is ready for new commands. * @nor: pointer to 'struct spi_nor'. @@ -741,19 +628,11 @@ static int spi_nor_fsr_ready(struct spi_nor *nor) */ static int spi_nor_ready(struct spi_nor *nor) { - int sr, fsr; - /* Flashes might override the standard routine. */ if (nor->params->ready) return nor->params->ready(nor); - sr = spi_nor_sr_ready(nor); - if (sr < 0) - return sr; - fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; - if (fsr < 0) - return fsr; - return sr && fsr; + return spi_nor_sr_ready(nor); } /** diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 7a68f2ad3ea1..e580830ed70f 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -8,6 +8,8 @@ #include "core.h" +#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ +#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */ #define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */ #define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */ @@ -17,6 +19,12 @@ #define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */ #define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */ +/* Flag Status Register bits */ +#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ +#define FSR_E_ERR BIT(5) /* Erase operation status */ +#define FSR_P_ERR BIT(4) /* Program operation status */ +#define FSR_PT_ERR BIT(1) /* Protection error bit */ + static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable) { struct spi_mem_op op; @@ -273,6 +281,125 @@ static int micron_st_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) return spi_nor_write_disable(nor); } +/** + * spi_nor_read_fsr() - Read the Flag Status Register. + * @nor: pointer to 'struct spi_nor' + * @fsr: pointer to a DMA-able buffer where the value of the + * Flag Status Register will be written. Should be at least 2 + * bytes. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, fsr, 0)); + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->params->rdsr_addr_nbytes; + op.dummy.nbytes = nor->params->rdsr_dummy; + /* + * We don't want to read only one byte in DTR mode. So, + * read 2 and then discard the second byte. + */ + op.data.nbytes = 2; + } + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, + 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d reading FSR\n", ret); + + return ret; +} + +/** + * spi_nor_clear_fsr() - Clear the Flag Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_fsr(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing FSR\n", ret); +} + +/** + * spi_nor_fsr_ready() - Query the Status Register as well as the Flag Status + * Register to see if the flash is ready for new commands. If there are any + * errors in the FSR clear them. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_fsr_ready(struct spi_nor *nor) +{ + int sr_ready, ret; + + sr_ready = spi_nor_sr_ready(nor); + if (sr_ready < 0) + return sr_ready; + + ret = spi_nor_read_fsr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { + if (nor->bouncebuf[0] & FSR_E_ERR) + dev_err(nor->dev, "Erase operation failed.\n"); + else + dev_err(nor->dev, "Program operation failed.\n"); + + if (nor->bouncebuf[0] & FSR_PT_ERR) + dev_err(nor->dev, + "Attempted to modify a protected sector.\n"); + + spi_nor_clear_fsr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return sr_ready && !!(nor->bouncebuf[0] & FSR_READY); +} + static void micron_st_nor_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; @@ -281,8 +408,15 @@ static void micron_st_nor_default_init(struct spi_nor *nor) nor->params->set_4byte_addr_mode = micron_st_nor_set_4byte_addr_mode; } +static void micron_st_nor_late_init(struct spi_nor *nor) +{ + if (nor->flags & SNOR_F_USE_FSR) + nor->params->ready = spi_nor_fsr_ready; +} + static const struct spi_nor_fixups micron_st_nor_fixups = { .default_init = micron_st_nor_default_init, + .late_init = micron_st_nor_late_init, }; const struct spi_nor_manufacturer spi_nor_micron = { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index b44b05a6f934..4622251a79ff 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -47,8 +47,6 @@ #define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ #define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ -#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ -#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ #define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ #define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */ @@ -126,12 +124,6 @@ /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ -/* Flag Status Register bits */ -#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ -#define FSR_E_ERR BIT(5) /* Erase operation status */ -#define FSR_P_ERR BIT(4) /* Program operation status */ -#define FSR_PT_ERR BIT(1) /* Protection error bit */ - /* Status Register 2 bits. */ #define SR2_QUAD_EN_BIT1 BIT(1) #define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */ -- cgit v1.2.3-71-gd317 From 837d5181beef068c16bb8424c2c1571a7d5d7966 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 23 Feb 2022 14:43:54 +0100 Subject: mtd: spi-nor: move all spansion specifics into spansion.c The clear status register flags is only available on spansion flashes. Move all the functions around that into the spanion module. Signed-off-by: Michael Walle Signed-off-by: Tudor Ambarus Tested-by: Pratyush Yadav # on mt35xu512aba, s28hs512t Reviewed-by: Pratyush Yadav Link: https://lore.kernel.org/r/20220223134358.1914798-29-michael@walle.cc --- drivers/mtd/spi-nor/core.c | 49 ----------------------------- drivers/mtd/spi-nor/spansion.c | 70 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/spi-nor.h | 1 - 3 files changed, 70 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index ac0faedebafe..e2b8b0a438ce 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -554,33 +554,6 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear) return ret; } -/** - * spi_nor_clear_sr() - Clear the Status Register. - * @nor: pointer to 'struct spi_nor'. - */ -static void spi_nor_clear_sr(struct spi_nor *nor) -{ - int ret; - - if (nor->spimem) { - struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), - SPI_MEM_OP_NO_ADDR, - SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_NO_DATA); - - spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); - - ret = spi_mem_exec_op(nor->spimem, &op); - } else { - ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, - NULL, 0); - } - - if (ret) - dev_dbg(nor->dev, "error %d clearing SR\n", ret); -} - /** * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready * for new commands. @@ -595,28 +568,6 @@ int spi_nor_sr_ready(struct spi_nor *nor) if (ret) return ret; - if (nor->flags & SNOR_F_USE_CLSR && - nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { - if (nor->bouncebuf[0] & SR_E_ERR) - dev_err(nor->dev, "Erase Error occurred\n"); - else - dev_err(nor->dev, "Programming Error occurred\n"); - - spi_nor_clear_sr(nor); - - /* - * WEL bit remains set to one when an erase or page program - * error occurs. Issue a Write Disable command to protect - * against inadvertent writes that can possibly corrupt the - * contents of the memory. - */ - ret = spi_nor_write_disable(nor); - if (ret) - return ret; - - return -EIO; - } - return !(nor->bouncebuf[0] & SR_WIP); } diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 1a0e7214d9e5..dbafe97c2636 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -8,6 +8,7 @@ #include "core.h" +#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ #define SPINOR_REG_CYPRESS_CFR2V 0x00800003 @@ -294,6 +295,72 @@ static const struct flash_info spansion_nor_parts[] = { }, }; +/** + * spi_nor_clear_sr() - Clear the Status Register. + * @nor: pointer to 'struct spi_nor'. + */ +static void spi_nor_clear_sr(struct spi_nor *nor) +{ + int ret; + + if (nor->spimem) { + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_DATA); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, + NULL, 0); + } + + if (ret) + dev_dbg(nor->dev, "error %d clearing SR\n", ret); +} + +/** + * spi_nor_sr_ready_and_clear() - Query the Status Register to see if the flash + * is ready for new commands and clear it if there are any errors. + * @nor: pointer to 'struct spi_nor'. + * + * Return: 1 if ready, 0 if not ready, -errno on errors. + */ +static int spi_nor_sr_ready_and_clear(struct spi_nor *nor) +{ + int ret; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + if (nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { + if (nor->bouncebuf[0] & SR_E_ERR) + dev_err(nor->dev, "Erase Error occurred\n"); + else + dev_err(nor->dev, "Programming Error occurred\n"); + + spi_nor_clear_sr(nor); + + /* + * WEL bit remains set to one when an erase or page program + * error occurs. Issue a Write Disable command to protect + * against inadvertent writes that can possibly corrupt the + * contents of the memory. + */ + ret = spi_nor_write_disable(nor); + if (ret) + return ret; + + return -EIO; + } + + return !(nor->bouncebuf[0] & SR_WIP); +} + static void spansion_nor_late_init(struct spi_nor *nor) { if (nor->params->size > SZ_16M) { @@ -302,6 +369,9 @@ static void spansion_nor_late_init(struct spi_nor *nor) nor->erase_opcode = SPINOR_OP_SE; nor->mtd.erasesize = nor->info->sector_size; } + + if (nor->flags & SNOR_F_USE_CLSR) + nor->params->ready = spi_nor_sr_ready_and_clear; } static const struct spi_nor_fixups spansion_nor_fixups = { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 4622251a79ff..5e25a7b75ae2 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -90,7 +90,6 @@ /* Used for Spansion flashes only. */ #define SPINOR_OP_BRWR 0x17 /* Bank register write */ -#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ /* Used for Micron flashes only. */ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ -- cgit v1.2.3-71-gd317 From bc82c38a6933aab308387d4aca47e0a05de7b553 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 11 Feb 2022 08:10:18 +0100 Subject: tracing: Uninline trace_trigger_soft_disabled() partly On a powerpc32 build with CONFIG_CC_OPTIMISE_FOR_SIZE, the inline keyword is not honored and trace_trigger_soft_disabled() appears approx 50 times in vmlinux. Adding -Winline to the build, the following message appears: ./include/linux/trace_events.h:712:1: error: inlining failed in call to 'trace_trigger_soft_disabled': call is unlikely and code size would grow [-Werror=inline] That function is rather big for an inlined function: c003df60 : c003df60: 94 21 ff f0 stwu r1,-16(r1) c003df64: 7c 08 02 a6 mflr r0 c003df68: 90 01 00 14 stw r0,20(r1) c003df6c: bf c1 00 08 stmw r30,8(r1) c003df70: 83 e3 00 24 lwz r31,36(r3) c003df74: 73 e9 01 00 andi. r9,r31,256 c003df78: 41 82 00 10 beq c003df88 c003df7c: 38 60 00 00 li r3,0 c003df80: 39 61 00 10 addi r11,r1,16 c003df84: 4b fd 60 ac b c0014030 <_rest32gpr_30_x> c003df88: 73 e9 00 80 andi. r9,r31,128 c003df8c: 7c 7e 1b 78 mr r30,r3 c003df90: 41 a2 00 14 beq c003dfa4 c003df94: 38 c0 00 00 li r6,0 c003df98: 38 a0 00 00 li r5,0 c003df9c: 38 80 00 00 li r4,0 c003dfa0: 48 05 c5 f1 bl c009a590 c003dfa4: 73 e9 00 40 andi. r9,r31,64 c003dfa8: 40 82 00 28 bne c003dfd0 c003dfac: 73 ff 02 00 andi. r31,r31,512 c003dfb0: 41 82 ff cc beq c003df7c c003dfb4: 80 01 00 14 lwz r0,20(r1) c003dfb8: 83 e1 00 0c lwz r31,12(r1) c003dfbc: 7f c3 f3 78 mr r3,r30 c003dfc0: 83 c1 00 08 lwz r30,8(r1) c003dfc4: 7c 08 03 a6 mtlr r0 c003dfc8: 38 21 00 10 addi r1,r1,16 c003dfcc: 48 05 6f 6c b c0094f38 c003dfd0: 38 60 00 01 li r3,1 c003dfd4: 4b ff ff ac b c003df80 However it is located in a hot path so inlining it is important. But forcing inlining of the entire function by using __always_inline leads to increasing the text size by approx 20 kbytes. Instead, split the fonction in two parts, one part with the likely fast path, flagged __always_inline, and a second part out of line. With this change, on a powerpc32 with CONFIG_CC_OPTIMISE_FOR_SIZE vmlinux text increases by only 1,4 kbytes, which is partly compensated by a decrease of vmlinux data by 7 kbytes. On ppc64_defconfig which has CONFIG_CC_OPTIMISE_FOR_SPEED, this change reduces vmlinux text by more than 30 kbytes. Link: https://lkml.kernel.org/r/69ce0986a52d026d381d612801d978aa4f977460.1644563295.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Signed-off-by: Steven Rostedt (Google) --- include/linux/trace_events.h | 22 ++++++++++++---------- kernel/trace/trace_events_trigger.c | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 70c069aef02c..dcea51fb60e2 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -699,6 +699,8 @@ event_triggers_post_call(struct trace_event_file *file, bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); +bool __trace_trigger_soft_disabled(struct trace_event_file *file); + /** * trace_trigger_soft_disabled - do triggers and test if soft disabled * @file: The file pointer of the event to test @@ -708,20 +710,20 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); * triggers that require testing the fields, it will return true, * otherwise false. */ -static inline bool +static __always_inline bool trace_trigger_soft_disabled(struct trace_event_file *file) { unsigned long eflags = file->flags; - if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { - if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL, NULL, NULL); - if (eflags & EVENT_FILE_FL_SOFT_DISABLED) - return true; - if (eflags & EVENT_FILE_FL_PID_FILTER) - return trace_event_ignore_this_pid(file); - } - return false; + if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE | + EVENT_FILE_FL_SOFT_DISABLED | + EVENT_FILE_FL_PID_FILTER)))) + return false; + + if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND)) + return false; + + return __trace_trigger_soft_disabled(file); } #ifdef CONFIG_BPF_EVENTS diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index efe563140f27..7eb9d04f1c2e 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -84,6 +84,20 @@ event_triggers_call(struct trace_event_file *file, } EXPORT_SYMBOL_GPL(event_triggers_call); +bool __trace_trigger_soft_disabled(struct trace_event_file *file) +{ + unsigned long eflags = file->flags; + + if (eflags & EVENT_FILE_FL_TRIGGER_MODE) + event_triggers_call(file, NULL, NULL, NULL); + if (eflags & EVENT_FILE_FL_SOFT_DISABLED) + return true; + if (eflags & EVENT_FILE_FL_PID_FILTER) + return trace_event_ignore_this_pid(file); + return false; +} +EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled); + /** * event_triggers_post_call - Call 'post_triggers' for a trace event * @file: The trace_event_file associated with the event -- cgit v1.2.3-71-gd317 From 8786fde8421ce755a842051f9528674a1b1f0b9a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 22 Jan 2022 20:54:52 +0000 Subject: Convert NFS from readpages to readahead NFS is one of the last two users of the deprecated ->readpages aop. This conversion looks straightforward, but I have only compile-tested it. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 2 +- fs/nfs/nfstrace.h | 6 +++--- fs/nfs/read.c | 21 +++++++++++++-------- include/linux/nfs_fs.h | 3 +-- 4 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 76d76acbc594..4d681683d13c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -514,7 +514,7 @@ static void nfs_swap_deactivate(struct file *file) const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, - .readpages = nfs_readpages, + .readahead = nfs_readahead, .set_page_dirty = __set_page_dirty_nobuffers, .writepage = nfs_writepage, .writepages = nfs_writepages, diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 317ce27bdc4b..4611aa3a21a4 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -889,11 +889,11 @@ TRACE_EVENT(nfs_aop_readpage_done, TRACE_EVENT(nfs_aop_readahead, TP_PROTO( const struct inode *inode, - struct page *page, + loff_t pos, unsigned int nr_pages ), - TP_ARGS(inode, page, nr_pages), + TP_ARGS(inode, pos, nr_pages), TP_STRUCT__entry( __field(dev_t, dev) @@ -911,7 +911,7 @@ TRACE_EVENT(nfs_aop_readahead, __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); - __entry->offset = page_index(page) << PAGE_SHIFT; + __entry->offset = pos; __entry->nr_pages = nr_pages; ), diff --git a/fs/nfs/read.c b/fs/nfs/read.c index eb00229c1a50..2472f962a9a2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -290,9 +290,8 @@ static void nfs_readpage_result(struct rpc_task *task, } static int -readpage_async_filler(void *data, struct page *page) +readpage_async_filler(struct nfs_readdesc *desc, struct page *page) { - struct nfs_readdesc *desc = data; struct inode *inode = page_file_mapping(page)->host; unsigned int rsize = NFS_SERVER(inode)->rsize; struct nfs_page *new; @@ -397,14 +396,16 @@ out_unlock: return ret; } -int nfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +void nfs_readahead(struct readahead_control *ractl) { + unsigned int nr_pages = readahead_count(ractl); + struct file *file = ractl->file; struct nfs_readdesc desc; - struct inode *inode = mapping->host; + struct inode *inode = ractl->mapping->host; + struct page *page; int ret; - trace_nfs_aop_readahead(inode, lru_to_page(pages), nr_pages); + trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages); nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); ret = -ESTALE; @@ -422,14 +423,18 @@ int nfs_readpages(struct file *file, struct address_space *mapping, nfs_pageio_init_read(&desc.pgio, inode, false, &nfs_async_read_completion_ops); - ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); + while ((page = readahead_page(ractl)) != NULL) { + ret = readpage_async_filler(&desc, page); + put_page(page); + if (ret) + break; + } nfs_pageio_complete_read(&desc.pgio); put_nfs_open_context(desc.ctx); out: trace_nfs_aop_readahead_done(inode, nr_pages, ret); - return ret; } int __init nfs_init_readpagecache(void) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 68f81d8d36de..333ea05e2531 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -601,8 +601,7 @@ nfs_have_writebacks(struct inode *inode) * linux/fs/nfs/read.c */ extern int nfs_readpage(struct file *, struct page *); -extern int nfs_readpages(struct file *, struct address_space *, - struct list_head *, unsigned); +void nfs_readahead(struct readahead_control *); /* * inline functions -- cgit v1.2.3-71-gd317 From 43245eca6e670ebf65908b549641c1460a9cc944 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 2 Feb 2022 17:55:02 -0500 Subject: NFSv4.1 support for NFS4_RESULT_PRESERVER_UNLINKED In 4.1+, the server is allowed to set a flag NFS4_RESULT_PRESERVE_UNLINKED in reply to the OPEN, that tells the client that it does not need to do a silly rename of an opened file when it's being removed. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 10 ++++++++-- fs/nfs/nfs4proc.c | 2 ++ include/linux/nfs_fs.h | 1 + include/uapi/linux/nfs4.h | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fbb4a522d716..8b190c8e4a45 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1419,7 +1419,12 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) if (flags & LOOKUP_REVAL) goto out_force; out: - return (inode->i_nlink == 0) ? -ESTALE : 0; + if (inode->i_nlink > 0 || + (inode->i_nlink == 0 && + test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags))) + return 0; + else + return -ESTALE; out_force: if (flags & LOOKUP_RCU) return -ECHILD; @@ -2330,7 +2335,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) trace_nfs_unlink_enter(dir, dentry); spin_lock(&dentry->d_lock); - if (d_count(dentry) > 1) { + if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED, + &NFS_I(d_inode(dentry))->flags)) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b3793b82a5e7..73a9b6de666c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3050,6 +3050,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); + if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) + set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); dentry = opendata->dentry; if (d_really_is_negative(dentry)) { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 333ea05e2531..0e79dbbc759a 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -277,6 +277,7 @@ struct nfs4_copy_state { #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ +#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 800bb0ffa6e6..1d2043708bf1 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -45,6 +45,7 @@ #define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 +#define NFS4_OPEN_RESULT_PRESERVE_UNLINKED 0x0008 #define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020 #define NFS4_SHARE_ACCESS_MASK 0x000F -- cgit v1.2.3-71-gd317 From 88a6099fc3274a27814d26dd688fdc5cd7a480ee Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 9 Feb 2022 13:22:48 -0500 Subject: NFS: Replace last uses of NFS_INO_REVAL_PAGECACHE Now that we have more fine grained attribute revalidation, let's just get rid of NFS_INO_REVAL_PAGECACHE. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 24 +++++++++++------------- fs/nfs/write.c | 2 +- include/linux/nfs_fs.h | 8 +++----- 3 files changed, 15 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 8cf29c6cd9f9..3adf8b4a0079 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -236,19 +236,17 @@ static void nfs_zap_caches_locked(struct inode *inode) nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; - if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_DATA - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_XATTR - | NFS_INO_REVAL_PAGECACHE); - } else - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_XATTR - | NFS_INO_REVAL_PAGECACHE); + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | + NFS_INO_INVALID_DATA | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR); + else + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR); nfs_zap_label_cache_locked(nfsi); } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 987a187bd39a..f88b0eb9b18e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -306,7 +306,7 @@ static void nfs_set_pageerror(struct address_space *mapping) /* Force file size revalidation */ spin_lock(&inode->i_lock); nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | - NFS_INO_REVAL_PAGECACHE | + NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); spin_unlock(&inode->i_lock); } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 0e79dbbc759a..ce3128e4bffa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -356,11 +356,9 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE - | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_INVALID_CHANGE - | NFS_INO_INVALID_CTIME; + nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | + NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | + NFS_INO_INVALID_SIZE; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); -- cgit v1.2.3-71-gd317 From 41e97b7f8a15d15da03ca15e6ff7b9b7ab7f588c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 9 Feb 2022 13:26:19 -0500 Subject: NFS: Remove unused flag NFS_INO_REVAL_PAGECACHE Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 5 ++--- fs/nfs/nfstrace.h | 1 - include/linux/nfs_fs.h | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 3adf8b4a0079..f9fc506ebb29 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -203,14 +203,13 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) NFS_INO_INVALID_OTHER | NFS_INO_INVALID_XATTR); flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); - } else if (flags & NFS_INO_REVAL_PAGECACHE) - flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE; + } if (!nfs_has_xattr_cache(nfsi)) flags &= ~NFS_INO_INVALID_XATTR; if (flags & NFS_INO_INVALID_DATA) nfs_fscache_invalidate(inode, 0); - flags &= ~(NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED); + flags &= ~NFS_INO_REVAL_FORCED; nfsi->cache_validity |= flags; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 4611aa3a21a4..45a310b586ce 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -21,7 +21,6 @@ { NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \ { NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \ { NFS_INO_INVALID_ACL, "INVALID_ACL" }, \ - { NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \ { NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \ { NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \ { NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ce3128e4bffa..72a732a5103c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -247,7 +247,6 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ #define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ #define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ -#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ #define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ #define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ #define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ -- cgit v1.2.3-71-gd317 From 891b7023010cc0d62d814619b1893745d7613f7f Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sat, 5 Feb 2022 11:36:09 +0100 Subject: clk: mux: Declare u32 *table parameter as const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The elements of the table are never modified in clk-mux.c. To make this clear to clock drivers, declare the parameter as const u32 *table. Signed-off-by: Jonathan Neuschäfer Link: https://lore.kernel.org/r/20220205103613.1216218-4-j.neuschaefer@gmx.net Signed-off-by: Stephen Boyd --- drivers/clk/clk-mux.c | 10 +++++----- include/linux/clk-provider.h | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 20582aae7a35..214045f6e989 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -40,7 +40,7 @@ static inline void clk_mux_writel(struct clk_mux *mux, u32 val) writel(val, mux->reg); } -int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, +int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags, unsigned int val) { int num_parents = clk_hw_get_num_parents(hw); @@ -67,7 +67,7 @@ int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, } EXPORT_SYMBOL_GPL(clk_mux_val_to_index); -unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index) +unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index) { unsigned int val = index; @@ -152,7 +152,7 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np, const struct clk_hw **parent_hws, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock) + u8 clk_mux_flags, const u32 *table, spinlock_t *lock) { struct clk_mux *mux; struct clk_hw *hw; @@ -218,7 +218,7 @@ struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node const struct clk_hw **parent_hws, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock) + u8 clk_mux_flags, const u32 *table, spinlock_t *lock) { struct clk_hw **ptr, *hw; @@ -244,7 +244,7 @@ EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock) + u8 clk_mux_flags, const u32 *table, spinlock_t *lock) { struct clk_hw *hw; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2faa6f7aa8a8..27be57528874 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -888,7 +888,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw); struct clk_mux { struct clk_hw hw; void __iomem *reg; - u32 *table; + const u32 *table; u32 mask; u8 shift; u8 flags; @@ -913,18 +913,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np, const struct clk_hw **parent_hws, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); + u8 clk_mux_flags, const u32 *table, spinlock_t *lock); struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np, const char *name, u8 num_parents, const char * const *parent_names, const struct clk_hw **parent_hws, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); + u8 clk_mux_flags, const u32 *table, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); + u8 clk_mux_flags, const u32 *table, spinlock_t *lock); #define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \ shift, width, clk_mux_flags, lock) \ @@ -962,9 +962,9 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, (shift), BIT((width)) - 1, (clk_mux_flags), \ NULL, (lock)) -int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, +int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags, unsigned int val); -unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index); +unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index); void clk_unregister_mux(struct clk *clk); void clk_hw_unregister_mux(struct clk_hw *hw); -- cgit v1.2.3-71-gd317 From f7f497cb702462e8505ff3d8d4e7722ad95626a1 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Wed, 16 Feb 2022 12:30:35 +0530 Subject: jbd2: kill t_handle_lock transaction spinlock This patch kills t_handle_lock transaction spinlock completely from jbd2. To explain the reasoning, currently there were three sites at which this spinlock was used. 1. jbd2_journal_wait_updates() a. Based on careful code review it can be seen that, we don't need this lock here. This is since we wait for any currently ongoing updates based on a atomic variable t_updates. And we anyway don't take any t_handle_lock while in stop_this_handle(). i.e. write_lock(&journal->j_state_lock() jbd2_journal_wait_updates() stop_this_handle() while (atomic_read(txn->t_updates) { | DEFINE_WAIT(wait); | prepare_to_wait(); | if (atomic_read(txn->t_updates) if (atomic_dec_and_test(txn->t_updates)) write_unlock(&journal->j_state_lock); schedule(); wake_up() write_lock(&journal->j_state_lock); finish_wait(); } txn->t_state = T_COMMIT write_unlock(&journal->j_state_lock); b. Also note that between atomic_inc(&txn->t_updates) in start_this_handle() and jbd2_journal_wait_updates(), the synchronization happens via read_lock(journal->j_state_lock) in start_this_handle(); 2. jbd2_journal_extend() a. jbd2_journal_extend() is called with the handle of each process from task_struct. So no lock required in updating member fields of handle_t b. For member fields of h_transaction, all updates happens only via atomic APIs (which is also within read_lock()). So, no need of this transaction spinlock. 3. update_t_max_wait() Based on Jan suggestion, this can be carefully removed using atomic cmpxchg API. Note that there can be several processes which are waiting for a new transaction to be allocated and started. For doing this only one process will succeed in taking write_lock() and allocating a new txn. After that all of the process will be updating the t_max_wait (max transaction wait time). This can be done via below method w/o taking any locks using atomic cmpxchg. For more details refer [1] new = get_new_val(); old = READ_ONCE(ptr->max_val); while (old < new) old = cmpxchg(&ptr->max_val, old, new); [1]: https://lwn.net/Articles/849237/ Suggested-by: Jan Kara Signed-off-by: Ritesh Harjani Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/d89e599658b4a1f3893a48c6feded200073037fc.1644992076.git.riteshh@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/jbd2/transaction.c | 28 +++++++++------------------- include/linux/jbd2.h | 3 --- 2 files changed, 9 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 259e00046a8b..83801a8be078 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -107,7 +107,6 @@ static void jbd2_get_transaction(journal_t *journal, transaction->t_start_time = ktime_get(); transaction->t_tid = journal->j_transaction_sequence++; transaction->t_expires = jiffies + journal->j_commit_interval; - spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, jbd2_descriptor_blocks_per_trans(journal) + @@ -139,24 +138,21 @@ static void jbd2_get_transaction(journal_t *journal, /* * Update transaction's maximum wait time, if debugging is enabled. * - * In order for t_max_wait to be reliable, it must be protected by a - * lock. But doing so will mean that start_this_handle() can not be - * run in parallel on SMP systems, which limits our scalability. So - * unless debugging is enabled, we no longer update t_max_wait, which - * means that maximum wait time reported by the jbd2_run_stats - * tracepoint will always be zero. + * t_max_wait is carefully updated here with use of atomic compare exchange. + * Note that there could be multiplre threads trying to do this simultaneously + * hence using cmpxchg to avoid any use of locks in this case. */ static inline void update_t_max_wait(transaction_t *transaction, unsigned long ts) { #ifdef CONFIG_JBD2_DEBUG + unsigned long oldts, newts; if (jbd2_journal_enable_debug && time_after(transaction->t_start, ts)) { - ts = jbd2_time_diff(ts, transaction->t_start); - spin_lock(&transaction->t_handle_lock); - if (ts > transaction->t_max_wait) - transaction->t_max_wait = ts; - spin_unlock(&transaction->t_handle_lock); + newts = jbd2_time_diff(ts, transaction->t_start); + oldts = READ_ONCE(transaction->t_max_wait); + while (oldts < newts) + oldts = cmpxchg(&transaction->t_max_wait, oldts, newts); } #endif } @@ -690,7 +686,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) DIV_ROUND_UP( handle->h_revoke_credits_requested, journal->j_revoke_records_per_block); - spin_lock(&transaction->t_handle_lock); wanted = atomic_add_return(nblocks, &transaction->t_outstanding_credits); @@ -698,7 +693,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); atomic_sub(nblocks, &transaction->t_outstanding_credits); - goto unlock; + goto error_out; } trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, @@ -714,8 +709,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); -unlock: - spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); return result; @@ -860,15 +853,12 @@ void jbd2_journal_wait_updates(journal_t *journal) if (!transaction) break; - spin_lock(&transaction->t_handle_lock); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&transaction->t_updates)) { - spin_unlock(&transaction->t_handle_lock); finish_wait(&journal->j_wait_updates, &wait); break; } - spin_unlock(&transaction->t_handle_lock); write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_updates, &wait); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 9c3ada74ffb1..a787872e1e86 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -554,9 +554,6 @@ struct transaction_chp_stats_s { * ->j_list_lock * * j_state_lock - * ->t_handle_lock - * - * j_state_lock * ->j_list_lock (journal_unmap_buffer) * */ -- cgit v1.2.3-71-gd317 From 5e187189ec324f78035d33a4bc123a9c4ca6f3e3 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 26 Feb 2022 12:18:29 +0800 Subject: net: ip: add skb drop reasons for ip egress path Replace kfree_skb() which is used in the packet egress path of IP layer with kfree_skb_reason(). Functions that are involved include: __ip_queue_xmit() ip_finish_output() ip_mc_finish_output() ip6_output() ip6_finish_output() ip6_finish_output2() Following new drop reasons are introduced: SKB_DROP_REASON_IP_OUTNOROUTES SKB_DROP_REASON_BPF_CGROUP_EGRESS SKB_DROP_REASON_IPV6DISABLED SKB_DROP_REASON_NEIGH_CREATEFAIL Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++++ include/trace/events/skb.h | 5 +++++ net/ipv4/ip_output.c | 8 ++++---- net/ipv6/ip6_output.c | 6 +++--- 4 files changed, 21 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 31be38078918..62b4bed1b7bc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -380,6 +380,15 @@ enum skb_drop_reason { * the ofo queue, corresponding to * LINUX_MIB_TCPOFOMERGE */ + SKB_DROP_REASON_IP_OUTNOROUTES, /* route lookup failed */ + SKB_DROP_REASON_BPF_CGROUP_EGRESS, /* dropped by + * BPF_PROG_TYPE_CGROUP_SKB + * eBPF program + */ + SKB_DROP_REASON_IPV6DISABLED, /* IPv6 is disabled on the device */ + SKB_DROP_REASON_NEIGH_CREATEFAIL, /* failed to create neigh + * entry + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 2ab7193313aa..97f8097651da 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -37,6 +37,11 @@ EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \ + EM(SKB_DROP_REASON_IP_OUTNOROUTES, IP_OUTNOROUTES) \ + EM(SKB_DROP_REASON_BPF_CGROUP_EGRESS, \ + BPF_CGROUP_EGRESS) \ + EM(SKB_DROP_REASON_IPV6DISABLED, IPV6DISABLED) \ + EM(SKB_DROP_REASON_NEIGH_CREATEFAIL, NEIGH_CREATEFAIL) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 2941de5da871..6df3545c891d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -233,7 +233,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s net_dbg_ratelimited("%s: No header cache and no neighbour!\n", __func__); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL); return -EINVAL; } @@ -317,7 +317,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk case NET_XMIT_CN: return __ip_finish_output(net, sk, skb) ? : ret; default: - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); return ret; } } @@ -337,7 +337,7 @@ static int ip_mc_finish_output(struct net *net, struct sock *sk, case NET_XMIT_SUCCESS: break; default: - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); return ret; } @@ -536,7 +536,7 @@ packet_routed: no_route: rcu_read_unlock(); IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES); return -EHOSTUNREACH; } EXPORT_SYMBOL(__ip_queue_xmit); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c5c10f798025..c5edc86b18bd 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -130,7 +130,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * rcu_read_unlock_bh(); IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL); return -EINVAL; } @@ -202,7 +202,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s case NET_XMIT_CN: return __ip6_finish_output(net, sk, skb) ? : ret; default: - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); return ret; } } @@ -217,7 +217,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED); return 0; } -- cgit v1.2.3-71-gd317 From a5736edda10ca2ba075606baad1dda3f2426766d Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Sat, 26 Feb 2022 12:18:30 +0800 Subject: net: neigh: use kfree_skb_reason() for __neigh_event_send() Replace kfree_skb() used in __neigh_event_send() with kfree_skb_reason(). Following drop reasons are added: SKB_DROP_REASON_NEIGH_FAILED SKB_DROP_REASON_NEIGH_QUEUEFULL SKB_DROP_REASON_NEIGH_DEAD The first two reasons above should be the hot path that skb drops in neighbour subsystem. Reviewed-by: Mengen Sun Reviewed-by: Hao Peng Signed-off-by: Menglong Dong Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ include/trace/events/skb.h | 3 +++ net/core/neighbour.c | 6 +++--- 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 62b4bed1b7bc..d67941f78b92 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -389,6 +389,11 @@ enum skb_drop_reason { SKB_DROP_REASON_NEIGH_CREATEFAIL, /* failed to create neigh * entry */ + SKB_DROP_REASON_NEIGH_FAILED, /* neigh entry in failed state */ + SKB_DROP_REASON_NEIGH_QUEUEFULL, /* arp_queue for neigh + * entry is full + */ + SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 97f8097651da..1977f301260d 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -42,6 +42,9 @@ BPF_CGROUP_EGRESS) \ EM(SKB_DROP_REASON_IPV6DISABLED, IPV6DISABLED) \ EM(SKB_DROP_REASON_NEIGH_CREATEFAIL, NEIGH_CREATEFAIL) \ + EM(SKB_DROP_REASON_NEIGH_FAILED, NEIGH_FAILED) \ + EM(SKB_DROP_REASON_NEIGH_QUEUEFULL, NEIGH_QUEUEFULL) \ + EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ec0bf737b076..f64ebd050f6c 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1171,7 +1171,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, neigh->updated = jiffies; write_unlock_bh(&neigh->lock); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED); return 1; } } else if (neigh->nud_state & NUD_STALE) { @@ -1193,7 +1193,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, if (!buff) break; neigh->arp_queue_len_bytes -= buff->truesize; - kfree_skb(buff); + kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); } skb_dst_force(skb); @@ -1215,7 +1215,7 @@ out_dead: if (neigh->nud_state & NUD_STALE) goto out_unlock_bh; write_unlock_bh(&neigh->lock); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD); trace_neigh_event_send_dead(neigh, 1); return 1; } -- cgit v1.2.3-71-gd317 From 21ca9fb62d4688da41825e0f05d8e7e26afc69d6 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:10 +0200 Subject: PCI/IOV: Add pci_iov_vf_id() to get VF index The PCI core uses the VF index internally, often called the vf_id, during the setup of the VF, eg pci_iov_add_virtfn(). This index is needed for device drivers that implement live migration for their internal operations that configure/control their VFs. Specifically, mlx5_vfio_pci driver that is introduced in coming patches from this series needs it and not the bus/device/function which is exposed today. Add pci_iov_vf_id() which computes the vf_id by reversing the math that was used to create the bus/device/function. Link: https://lore.kernel.org/all/20220224142024.147653-2-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Acked-by: Bjorn Helgaas Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/pci/iov.c | 14 ++++++++++++++ include/linux/pci.h | 8 +++++++- 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 0267977c9f17..2e9f3d70803a 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -33,6 +33,20 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id) } EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn); +int pci_iov_vf_id(struct pci_dev *dev) +{ + struct pci_dev *pf; + + if (!dev->is_virtfn) + return -EINVAL; + + pf = pci_physfn(dev); + return (((dev->bus->number << 8) + dev->devfn) - + ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) / + pf->sriov->stride; +} +EXPORT_SYMBOL_GPL(pci_iov_vf_id); + /* * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may * change when NumVFs changes. diff --git a/include/linux/pci.h b/include/linux/pci.h index 8253a5413d7c..3d4ff7b35ad1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2166,7 +2166,7 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); #ifdef CONFIG_PCI_IOV int pci_iov_virtfn_bus(struct pci_dev *dev, int id); int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); - +int pci_iov_vf_id(struct pci_dev *dev); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); @@ -2194,6 +2194,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) { return -ENOSYS; } + +static inline int pci_iov_vf_id(struct pci_dev *dev) +{ + return -ENOSYS; +} + static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } -- cgit v1.2.3-71-gd317 From a7e9f240c0da4fb73a353c603daf4beba04c6ecf Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:13 +0200 Subject: PCI/IOV: Add pci_iov_get_pf_drvdata() to allow VF reaching the drvdata of a PF There are some cases where a SR-IOV VF driver will need to reach into and interact with the PF driver. This requires accessing the drvdata of the PF. Provide a function pci_iov_get_pf_drvdata() to return this PF drvdata in a safe way. Normally accessing a drvdata of a foreign struct device would be done using the device_lock() to protect against device driver probe()/remove() races. However, due to the design of pci_enable_sriov() this will result in a ABBA deadlock on the device_lock as the PF's device_lock is held during PF sriov_configure() while calling pci_enable_sriov() which in turn holds the VF's device_lock while calling VF probe(), and similarly for remove. This means the VF driver can never obtain the PF's device_lock. Instead use the implicit locking created by pci_enable/disable_sriov(). A VF driver can access its PF drvdata only while its own driver is attached, and the PF driver can control access to its own drvdata based on when it calls pci_enable/disable_sriov(). To use this API the PF driver will setup the PF drvdata in the probe() function. pci_enable_sriov() is only called from sriov_configure() which cannot happen until probe() completes, ensuring no VF races with drvdata setup. For removal, the PF driver must call pci_disable_sriov() in its remove function before destroying any of the drvdata. This ensures that all VF drivers are unbound before returning, fencing concurrent access to the drvdata. The introduction of a new function to do this access makes clear the special locking scheme and the documents the requirements on the PF/VF drivers using this. Link: https://lore.kernel.org/all/20220224142024.147653-5-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Acked-by: Bjorn Helgaas Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/pci/iov.c | 29 +++++++++++++++++++++++++++++ include/linux/pci.h | 7 +++++++ 2 files changed, 36 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 2e9f3d70803a..28ec952e1221 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -47,6 +47,35 @@ int pci_iov_vf_id(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_iov_vf_id); +/** + * pci_iov_get_pf_drvdata - Return the drvdata of a PF + * @dev - VF pci_dev + * @pf_driver - Device driver required to own the PF + * + * This must be called from a context that ensures that a VF driver is attached. + * The value returned is invalid once the VF driver completes its remove() + * callback. + * + * Locking is achieved by the driver core. A VF driver cannot be probed until + * pci_enable_sriov() is called and pci_disable_sriov() does not return until + * all VF drivers have completed their remove(). + * + * The PF driver must call pci_disable_sriov() before it begins to destroy the + * drvdata. + */ +void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver) +{ + struct pci_dev *pf_dev; + + if (!dev->is_virtfn) + return ERR_PTR(-EINVAL); + pf_dev = dev->physfn; + if (pf_dev->driver != pf_driver) + return ERR_PTR(-EINVAL); + return pci_get_drvdata(pf_dev); +} +EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata); + /* * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may * change when NumVFs changes. diff --git a/include/linux/pci.h b/include/linux/pci.h index 3d4ff7b35ad1..60d423d8f0c4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2167,6 +2167,7 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); int pci_iov_virtfn_bus(struct pci_dev *dev, int id); int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); int pci_iov_vf_id(struct pci_dev *dev); +void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); @@ -2200,6 +2201,12 @@ static inline int pci_iov_vf_id(struct pci_dev *dev) return -ENOSYS; } +static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev, + struct pci_driver *pf_driver) +{ + return ERR_PTR(-EINVAL); +} + static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } -- cgit v1.2.3-71-gd317 From 1695b97b291e79295bf5c26cba5ecc4b443d8ac7 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 24 Feb 2022 16:20:14 +0200 Subject: net/mlx5: Expose APIs to get/put the mlx5 core device Expose an API to get the mlx5 core device from a given VF PCI device if mlx5_core is its driver. Upon the get API we stay with the intf_state_mutex locked to make sure that the device can't be gone/unloaded till the caller will complete its job over the device, this expects to be for a short period of time for any flow that the lock is taken. Upon the put API we unlock the intf_state_mutex. The use case for those APIs is the migration flow of a VF over VFIO PCI. In that case the VF doesn't ride on mlx5_core, because the device is driving *two* different PCI devices, the PF owned by mlx5_core and the VF owned by the vfio driver. The mlx5_core of the PF is accessed only during the narrow window of the VF's ioctl that requires its services. This allows the PF driver to be more independent of the VF driver, so long as it doesn't reset the FW. Link: https://lore.kernel.org/all/20220224142024.147653-6-yishaih@nvidia.com Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 44 ++++++++++++++++++++++++++ include/linux/mlx5/driver.h | 3 ++ 2 files changed, 47 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5b8958186157..e9aeba4267ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1881,6 +1881,50 @@ static struct pci_driver mlx5_core_driver = { .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count, }; +/** + * mlx5_vf_get_core_dev - Get the mlx5 core device from a given VF PCI device if + * mlx5_core is its driver. + * @pdev: The associated PCI device. + * + * Upon return the interface state lock stay held to let caller uses it safely. + * Caller must ensure to use the returned mlx5 device for a narrow window + * and put it back with mlx5_vf_put_core_dev() immediately once usage was over. + * + * Return: Pointer to the associated mlx5_core_dev or NULL. + */ +struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev) + __acquires(&mdev->intf_state_mutex) +{ + struct mlx5_core_dev *mdev; + + mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver); + if (IS_ERR(mdev)) + return NULL; + + mutex_lock(&mdev->intf_state_mutex); + if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) { + mutex_unlock(&mdev->intf_state_mutex); + return NULL; + } + + return mdev; +} +EXPORT_SYMBOL(mlx5_vf_get_core_dev); + +/** + * mlx5_vf_put_core_dev - Put the mlx5 core device back. + * @mdev: The mlx5 core device. + * + * Upon return the interface state lock is unlocked and caller should not + * access the mdev any more. + */ +void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev) + __releases(&mdev->intf_state_mutex) +{ + mutex_unlock(&mdev->intf_state_mutex); +} +EXPORT_SYMBOL(mlx5_vf_put_core_dev); + static void mlx5_core_verify_params(void) { if (prof_sel >= ARRAY_SIZE(profile)) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 78655d8d13a7..319322a8ff94 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1143,6 +1143,9 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); +struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); +void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); + #ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, struct ib_device *ibdev, -- cgit v1.2.3-71-gd317 From adfdaff3d14fe819a0420d81788f7ebbcd954940 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 24 Feb 2022 16:20:15 +0200 Subject: net/mlx5: Introduce migration bits and structures Introduce migration IFC related stuff to enable migration commands. Link: https://lore.kernel.org/all/20220224142024.147653-7-yishaih@nvidia.com Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 147 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 146 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 598ac3bcc901..b1c27409c997 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -127,6 +127,11 @@ enum { MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, MLX5_CMD_OP_ALLOC_SF = 0x113, MLX5_CMD_OP_DEALLOC_SF = 0x114, + MLX5_CMD_OP_SUSPEND_VHCA = 0x115, + MLX5_CMD_OP_RESUME_VHCA = 0x116, + MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117, + MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118, + MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -1757,7 +1762,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_682[0x1]; u8 log_max_sf[0x5]; u8 apu[0x1]; - u8 reserved_at_689[0x7]; + u8 reserved_at_689[0x4]; + u8 migration[0x1]; + u8 reserved_at_68e[0x2]; u8 log_min_sf_size[0x8]; u8 max_num_sf_partitions[0x8]; @@ -11519,4 +11526,142 @@ enum { MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, }; +enum { + MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0, + MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1, +}; + +struct mlx5_ifc_suspend_vhca_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_suspend_vhca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0, + MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1, +}; + +struct mlx5_ifc_resume_vhca_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_resume_vhca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_query_vhca_migration_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_vhca_migration_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 required_umem_size[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_save_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; + + u8 va[0x40]; + + u8 mkey[0x20]; + + u8 size[0x20]; +}; + +struct mlx5_ifc_save_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 actual_image_size[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_load_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; + + u8 va[0x40]; + + u8 mkey[0x20]; + + u8 size[0x20]; +}; + +struct mlx5_ifc_load_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + #endif /* MLX5_IFC_H */ -- cgit v1.2.3-71-gd317 From e8eb9e32999dc5995b19d5141f8ebb38f69696fc Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 24 Feb 2022 18:58:55 -0800 Subject: PCI: Add Fungible Vendor ID to pci_ids.h Cc: Bjorn Helgaas Cc: linux-pci@vger.kernel.org Signed-off-by: Dimitris Michailidis Acked-by: Bjorn Helgaas Signed-off-by: David S. Miller --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index aad54c666407..c7e6f2043c7d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2561,6 +2561,8 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_VENDOR_ID_FUNGIBLE 0x1dad + #define PCI_VENDOR_ID_HXT 0x1dbf #define PCI_VENDOR_ID_TEKRAM 0x1de1 -- cgit v1.2.3-71-gd317 From 91495f21fcec3a889d6a9c21e6df16c4c15f2184 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 25 Feb 2022 11:22:16 +0200 Subject: net: dsa: tag_8021q: replace the SVL bridging with VLAN-unaware IVL bridging For VLAN-unaware bridging, tag_8021q uses something perhaps a bit too tied with the sja1105 switch: each port uses the same pvid which is also used for standalone operation (a unique one from which the source port and device ID can be retrieved when packets from that port are forwarded to the CPU). Since each port has a unique pvid when performing autonomous forwarding, the switch must be configured for Shared VLAN Learning (SVL) such that the VLAN ID itself is ignored when performing FDB lookups. Without SVL, packets would always be flooded, since FDB lookup in the source port's VLAN would never find any entry. First of all, to make tag_8021q more palatable to switches which might not support Shared VLAN Learning, let's just use a common VLAN for all ports that are under the same bridge. Secondly, using Shared VLAN Learning means that FDB isolation can never be enforced. But if all ports under the same VLAN-unaware bridge share the same VLAN ID, it can. The disadvantage is that the CPU port can no longer perform precise source port identification for these packets. But at least we have a mechanism which has proven to be adequate for that situation: imprecise RX (dsa_find_designated_bridge_port_by_vid), which is what we use for termination on VLAN-aware bridges. The VLAN ID that VLAN-unaware bridges will use with tag_8021q is the same one as we were previously using for imprecise TX (bridge TX forwarding offload). It is already allocated, it is just a matter of using it. Note that because now all ports under the same bridge share the same VLAN, the complexity of performing a tag_8021q bridge join decreases dramatically. We no longer have to install the RX VLAN of a newly joining port into the port membership of the existing bridge ports. The newly joining port just becomes a member of the VLAN corresponding to that bridge, and the other ports are already members of it from when they joined the bridge themselves. So forwarding works properly. This means that we can unhook dsa_tag_8021q_bridge_{join,leave} from the cross-chip notifier level dsa_switch_bridge_{join,leave}. We can put these calls directly into the sja1105 driver. With this new mode of operation, a port controlled by tag_8021q can have two pvids whereas before it could only have one. The pvid for standalone operation is different from the pvid used for VLAN-unaware bridging. This is done, again, so that FDB isolation can be enforced. Let tag_8021q manage this by deleting the standalone pvid when a port joins a bridge, and restoring it when it leaves it. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_main.c | 4 +- drivers/net/dsa/sja1105/sja1105_vl.c | 15 +++- include/linux/dsa/8021q.h | 12 +-- net/dsa/dsa_priv.h | 4 - net/dsa/switch.c | 4 +- net/dsa/tag_8021q.c | 132 ++++++++++++--------------------- 6 files changed, 70 insertions(+), 101 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 8fc309446e1e..ab196dc615da 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2067,7 +2067,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port, if (rc) return rc; - rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge); + rc = dsa_tag_8021q_bridge_join(ds, port, bridge); if (rc) { sja1105_bridge_member(ds, port, bridge, false); return rc; @@ -2081,7 +2081,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port, static void sja1105_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { - dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge); + dsa_tag_8021q_bridge_leave(ds, port, bridge); sja1105_bridge_member(ds, port, bridge, false); } diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index f5dca6a9b0f9..14e6dd7fb103 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a, return false; } +/* FIXME: this should change when the bridge upper of the port changes. */ +static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp) +{ + unsigned long bridge_num; + + if (!dp->bridge) + return dsa_tag_8021q_rx_vid(dp); + + bridge_num = dsa_port_bridge_num_get(dp); + + return dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); +} + static int sja1105_init_virtual_links(struct sja1105_private *priv, struct netlink_ext_ack *extack) { @@ -395,7 +408,7 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv, vl_lookup[k].vlanprior = rule->key.vl.pcp; } else { struct dsa_port *dp = dsa_to_port(priv->ds, port); - u16 vid = dsa_tag_8021q_rx_vid(dp); + u16 vid = sja1105_port_get_tag_8021q_vid(dp); vl_lookup[k].vlanid = vid; vl_lookup[k].vlanprior = 0; diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 939a1beaddf7..f47f227baa27 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -32,17 +32,17 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); void dsa_tag_8021q_unregister(struct dsa_switch *ds); +int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge); + +void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge); + struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); -int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); - -void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); - u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 1ba93afdc874..7a1c98581f53 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -522,10 +522,6 @@ struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, const struct net_device *br); /* tag_8021q.c */ -int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, - struct dsa_notifier_bridge_info *info); -int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, - struct dsa_notifier_bridge_info *info); int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, struct dsa_notifier_tag_8021q_vlan_info *info); int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 0c2961cbc105..eb38beb10147 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -110,7 +110,7 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, return err; } - return dsa_tag_8021q_bridge_join(ds, info); + return 0; } static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, @@ -186,7 +186,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, return err; } - return dsa_tag_8021q_bridge_leave(ds, info); + return 0; } /* Matches for all upstream-facing ports (the CPU port and all upstream-facing diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 114f663332d0..c6555003f5df 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -110,6 +110,15 @@ int dsa_8021q_rx_source_port(u16 vid) } EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port); +/* Returns the decoded VBID from the RX VID. */ +static int dsa_tag_8021q_rx_vbid(u16 vid) +{ + u16 vbid_hi = (vid & DSA_8021Q_VBID_HI_MASK) >> DSA_8021Q_VBID_HI_SHIFT; + u16 vbid_lo = (vid & DSA_8021Q_VBID_LO_MASK) >> DSA_8021Q_VBID_LO_SHIFT; + + return (vbid_hi << 2) | vbid_lo; +} + bool vid_is_dsa_8021q_rxvlan(u16 vid) { return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX; @@ -244,11 +253,17 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, if (dsa_port_is_user(dp)) flags |= BRIDGE_VLAN_INFO_UNTAGGED; + /* Standalone VLANs are PVIDs */ if (vid_is_dsa_8021q_rxvlan(info->vid) && dsa_8021q_rx_switch_id(info->vid) == ds->index && dsa_8021q_rx_source_port(info->vid) == dp->index) flags |= BRIDGE_VLAN_INFO_PVID; + /* And bridging VLANs are PVIDs too on user ports */ + if (dsa_tag_8021q_rx_vbid(info->vid) && + dsa_port_is_user(dp)) + flags |= BRIDGE_VLAN_INFO_PVID; + err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid, flags); if (err) @@ -326,107 +341,52 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+ * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3 */ -static bool -dsa_port_tag_8021q_bridge_match(struct dsa_port *dp, - struct dsa_notifier_bridge_info *info) +int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) { - /* Don't match on self */ - if (dp->ds->dst->index == info->tree_index && - dp->ds->index == info->sw_index && - dp->index == info->port) - return false; - - if (dsa_port_is_user(dp)) - return dsa_port_offloads_bridge(dp, &info->bridge); - - return false; -} - -int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, - struct dsa_notifier_bridge_info *info) -{ - struct dsa_switch *targeted_ds; - struct dsa_port *targeted_dp; - struct dsa_port *dp; - u16 targeted_rx_vid; + struct dsa_port *dp = dsa_to_port(ds, port); + u16 standalone_vid, bridge_vid; int err; - if (!ds->tag_8021q_ctx) - return 0; - - targeted_ds = dsa_switch_find(info->tree_index, info->sw_index); - targeted_dp = dsa_to_port(targeted_ds, info->port); - targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp); - - dsa_switch_for_each_port(dp, ds) { - u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - - if (!dsa_port_tag_8021q_bridge_match(dp, info)) - continue; + /* Delete the standalone VLAN of the port and replace it with a + * bridging VLAN + */ + standalone_vid = dsa_tag_8021q_rx_vid(dp); + bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); - /* Install the RX VID of the targeted port in our VLAN table */ - err = dsa_port_tag_8021q_vlan_add(dp, targeted_rx_vid, true); - if (err) - return err; + err = dsa_port_tag_8021q_vlan_add(dp, bridge_vid, true); + if (err) + return err; - /* Install our RX VID into the targeted port's VLAN table */ - err = dsa_port_tag_8021q_vlan_add(targeted_dp, rx_vid, true); - if (err) - return err; - } + dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false); return 0; } +EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join); -int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, - struct dsa_notifier_bridge_info *info) +void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) { - struct dsa_switch *targeted_ds; - struct dsa_port *targeted_dp; - struct dsa_port *dp; - u16 targeted_rx_vid; - - if (!ds->tag_8021q_ctx) - return 0; - - targeted_ds = dsa_switch_find(info->tree_index, info->sw_index); - targeted_dp = dsa_to_port(targeted_ds, info->port); - targeted_rx_vid = dsa_tag_8021q_rx_vid(targeted_dp); - - dsa_switch_for_each_port(dp, ds) { - u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - - if (!dsa_port_tag_8021q_bridge_match(dp, info)) - continue; + struct dsa_port *dp = dsa_to_port(ds, port); + u16 standalone_vid, bridge_vid; + int err; - /* Remove the RX VID of the targeted port from our VLAN table */ - dsa_port_tag_8021q_vlan_del(dp, targeted_rx_vid, true); + /* Delete the bridging VLAN of the port and replace it with a + * standalone VLAN + */ + standalone_vid = dsa_tag_8021q_rx_vid(dp); + bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); - /* Remove our RX VID from the targeted port's VLAN table */ - dsa_port_tag_8021q_vlan_del(targeted_dp, rx_vid, true); + err = dsa_port_tag_8021q_vlan_add(dp, standalone_vid, false); + if (err) { + dev_err(ds->dev, + "Failed to delete tag_8021q standalone VLAN %d from port %d: %pe\n", + standalone_vid, port, ERR_PTR(err)); } - return 0; -} - -int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge) -{ - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); - - return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid, - true); -} -EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload); - -void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge) -{ - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); - - dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true); + dsa_port_tag_8021q_vlan_del(dp, bridge_vid, true); } -EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_unoffload); +EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_leave); /* Set up a port's tag_8021q RX and TX VLAN for standalone mode operation */ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) -- cgit v1.2.3-71-gd317 From d7f9787a763f35225287aedb9364c972ae128d18 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 25 Feb 2022 11:22:17 +0200 Subject: net: dsa: tag_8021q: add support for imprecise RX based on the VBID The sja1105 switch can't populate the PORT field of the tag_8021q header when sending a frame to the CPU with a non-zero VBID. Similar to dsa_find_designated_bridge_port_by_vid() which performs imprecise RX for VLAN-aware bridges, let's introduce a helper in tag_8021q for performing imprecise RX based on the VLAN that it has allocated for a VLAN-unaware bridge. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/linux/dsa/8021q.h | 6 +++++- net/dsa/tag_8021q.c | 38 ++++++++++++++++++++++++++++++++++++-- net/dsa/tag_ocelot_8021q.c | 2 +- net/dsa/tag_sja1105.c | 22 +++++++++++++--------- 4 files changed, 55 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index f47f227baa27..92f5243b841e 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -41,7 +41,11 @@ void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, + int *vbid); + +struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, + int vbid); u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index c6555003f5df..1cf245a6f18e 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -32,7 +32,7 @@ * VBID - { VID[9], VID[5:4] }: * Virtual bridge ID. If between 1 and 7, packet targets the broadcast * domain of a bridge. If transmitted as zero, packet targets a single - * port. Field only valid on transmit, must be ignored on receive. + * port. * * PORT - VID[3:0]: * Index of switch port. Must be between 0 and 15. @@ -533,7 +533,37 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id) +struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, + int vbid) +{ + struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_switch_tree *dst = cpu_dp->dst; + struct dsa_port *dp; + + if (WARN_ON(!vbid)) + return NULL; + + dsa_tree_for_each_user_port(dp, dst) { + if (!dp->bridge) + continue; + + if (dp->stp_state != BR_STATE_LEARNING && + dp->stp_state != BR_STATE_FORWARDING) + continue; + + if (dp->cpu_dp != cpu_dp) + continue; + + if (dsa_port_bridge_num_get(dp) == vbid) + return dp->slave; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid); + +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, + int *vbid) { u16 vid, tci; @@ -550,6 +580,10 @@ void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id) *source_port = dsa_8021q_rx_source_port(vid); *switch_id = dsa_8021q_rx_switch_id(vid); + + if (vbid) + *vbid = dsa_tag_8021q_rx_vbid(vid); + skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } EXPORT_SYMBOL_GPL(dsa_8021q_rcv); diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index bd6f1d0e5372..1144a87ad0db 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -77,7 +77,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, { int src_port, switch_id; - dsa_8021q_rcv(skb, &src_port, &switch_id); + dsa_8021q_rcv(skb, &src_port, &switch_id, NULL); skb->dev = dsa_master_find_slave(netdev, switch_id, src_port); if (!skb->dev) diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 72d5e0ef8dcf..9c5c00980b06 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -509,7 +509,7 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb) * packet. */ static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, - int *switch_id, u16 *vid) + int *switch_id, int *vbid, u16 *vid) { struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb); u16 vlan_tci; @@ -519,8 +519,8 @@ static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, else vlan_tci = ntohs(hdr->h_vlan_TCI); - if (vid_is_dsa_8021q_rxvlan(vlan_tci & VLAN_VID_MASK)) - return dsa_8021q_rcv(skb, source_port, switch_id); + if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK)) + return dsa_8021q_rcv(skb, source_port, switch_id, vbid); /* Try our best with imprecise RX */ *vid = vlan_tci & VLAN_VID_MASK; @@ -529,7 +529,7 @@ static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, static struct sk_buff *sja1105_rcv(struct sk_buff *skb, struct net_device *netdev) { - int source_port = -1, switch_id = -1; + int source_port = -1, switch_id = -1, vbid = -1; struct sja1105_meta meta = {0}; struct ethhdr *hdr; bool is_link_local; @@ -542,7 +542,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, if (sja1105_skb_has_tag_8021q(skb)) { /* Normal traffic path. */ - sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid); + sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); } else if (is_link_local) { /* Management traffic path. Switch embeds the switch ID and * port ID into bytes of the destination MAC, courtesy of @@ -561,7 +561,9 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, return NULL; } - if (source_port == -1 || switch_id == -1) + if (vbid >= 1) + skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); + else if (source_port == -1 || switch_id == -1) skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); else skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); @@ -686,7 +688,7 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb, static struct sk_buff *sja1110_rcv(struct sk_buff *skb, struct net_device *netdev) { - int source_port = -1, switch_id = -1; + int source_port = -1, switch_id = -1, vbid = -1; bool host_only = false; u16 vid = 0; @@ -700,9 +702,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb, /* Packets with in-band control extensions might still have RX VLANs */ if (likely(sja1105_skb_has_tag_8021q(skb))) - sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid); + sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); - if (source_port == -1 || switch_id == -1) + if (vbid >= 1) + skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); + else if (source_port == -1 || switch_id == -1) skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); else skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); -- cgit v1.2.3-71-gd317 From 04b67e18ce5b29785578397f6785f28f512d64aa Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 25 Feb 2022 11:22:20 +0200 Subject: net: dsa: tag_8021q: merge RX and TX VLANs In the old Shared VLAN Learning mode of operation that tag_8021q previously used for forwarding, we needed to have distinct concepts for an RX and a TX VLAN. An RX VLAN could be installed on all ports that were members of a given bridge, so that autonomous forwarding could still work, while a TX VLAN was dedicated for precise packet steering, so it just contained the CPU port and one egress port. Now that tag_8021q uses Independent VLAN Learning and imprecise RX/TX all over, those lines have been blurred and we no longer have the need to do precise TX towards a port that is in a bridge. As for standalone ports, it is fine to use the same VLAN ID for both RX and TX. This patch changes the tag_8021q format by shifting the VLAN range it reserves, and halving it. Previously, our DIR bits were encoding the VLAN direction (RX/TX) and were set to either 1 or 2. This meant that tag_8021q reserved 2K VLANs, or 50% of the available range. Change the DIR bits to a hardcoded value of 3 now, which makes tag_8021q reserve only 1K VLANs, and a different range now (the last 1K). This is done so that we leave the old format in place in case we need to return to it. In terms of code, the vid_is_dsa_8021q_rxvlan and vid_is_dsa_8021q_txvlan functions go away. Any vid_is_dsa_8021q is both a TX and an RX VLAN, and they are no longer distinct. For example, felix which did different things for different VLAN types, now needs to handle the RX and the TX logic for the same VLAN. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 120 ++++++++++++----------- drivers/net/dsa/sja1105/sja1105_main.c | 2 +- drivers/net/dsa/sja1105/sja1105_vl.c | 3 +- include/linux/dsa/8021q.h | 8 +- net/dsa/tag_8021q.c | 169 +++++++++------------------------ net/dsa/tag_ocelot_8021q.c | 2 +- net/dsa/tag_sja1105.c | 4 +- 7 files changed, 115 insertions(+), 193 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 57d6e62d7a48..33037ee305b4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -25,8 +25,10 @@ #include #include "felix.h" -static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that + * the tagger can perform RX source port identification. + */ +static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot *ocelot = &felix->ocelot; @@ -64,21 +66,32 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, return err; } -static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = &felix->ocelot; + + block_vcap_es0 = &ocelot->block[VCAP_ES0]; + + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + port, false); + if (!outer_tagging_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); +} + +/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 + * rules for steering those tagged packets towards the correct destination port + */ +static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot *ocelot = &felix->ocelot; struct dsa_switch *ds = felix->ds; int upstream, err; - /* tag_8021q.c assumes we are implementing this via port VLAN - * membership, which we aren't. So we don't need to add any VCAP filter - * for the CPU port. - */ - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) return -ENOMEM; @@ -135,41 +148,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return 0; } -static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, - u16 flags) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct ocelot *ocelot = ds->priv; - - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - return 0; -} - -static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) -{ - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot_vcap_block *block_vcap_es0; - struct ocelot *ocelot = &felix->ocelot; - - block_vcap_es0 = &ocelot->block[VCAP_ES0]; - - outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, - port, false); - if (!outer_tagging_rule) - return -ENOENT; - - return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); -} - -static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; @@ -177,9 +156,6 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) struct ocelot *ocelot = &felix->ocelot; int err; - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; @@ -195,22 +171,54 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port, false); if (!redirect_rule) - return 0; + return -ENOENT; return ocelot_vcap_filter_del(ocelot, redirect_rule); } +static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) +{ + struct ocelot *ocelot = ds->priv; + int err; + + /* tag_8021q.c assumes we are implementing this via port VLAN + * membership, which we aren't. So we don't need to add any VCAP filter + * for the CPU port. + */ + if (!dsa_is_user_port(ds, port)) + return 0; + + err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); + if (err) + return err; + + err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid); + if (err) { + felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); + return err; + } + + return 0; +} + static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct ocelot *ocelot = ds->priv; + int err; + + if (!dsa_is_user_port(ds, port)) + return 0; - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), - port, vid); + err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); + if (err) + return err; - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), - port, vid); + err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid); + if (err) { + felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); + return err; + } return 0; } diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index ab196dc615da..abc67b97bfc4 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2509,7 +2509,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port, */ if (vid_is_dsa_8021q(vlan->vid)) { NL_SET_ERR_MSG_MOD(extack, - "Range 1024-3071 reserved for dsa_8021q operation"); + "Range 3072-4095 reserved for dsa_8021q operation"); return -EBUSY; } diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index 14e6dd7fb103..1eef60207c6b 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -302,7 +302,7 @@ static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp) unsigned long bridge_num; if (!dp->bridge) - return dsa_tag_8021q_rx_vid(dp); + return dsa_tag_8021q_standalone_vid(dp); bridge_num = dsa_port_bridge_num_get(dp); @@ -407,6 +407,7 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv, vl_lookup[k].vlanid = rule->key.vl.vid; vl_lookup[k].vlanprior = rule->key.vl.pcp; } else { + /* FIXME */ struct dsa_port *dp = dsa_to_port(priv->ds, port); u16 vid = sja1105_port_get_tag_8021q_vid(dp); diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 92f5243b841e..b4e2862633f6 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -49,18 +49,12 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); -u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp); - -u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp); +u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp); int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -bool vid_is_dsa_8021q_rxvlan(u16 vid); - -bool vid_is_dsa_8021q_txvlan(u16 vid); - bool vid_is_dsa_8021q(u16 vid); #endif /* _NET_DSA_8021Q_H */ diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 1cf245a6f18e..eac43f5b4e07 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -16,15 +16,11 @@ * * | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * +-----------+-----+-----------------+-----------+-----------------------+ - * | DIR | VBID| SWITCH_ID | VBID | PORT | + * | RSV | VBID| SWITCH_ID | VBID | PORT | * +-----------+-----+-----------------+-----------+-----------------------+ * - * DIR - VID[11:10]: - * Direction flags. - * * 1 (0b01) for RX VLAN, - * * 2 (0b10) for TX VLAN. - * These values make the special VIDs of 0, 1 and 4095 to be left - * unused by this coding scheme. + * RSV - VID[11:10]: + * Reserved. Must be set to 3 (0b11). * * SWITCH_ID - VID[8:6]: * Index of switch within DSA tree. Must be between 0 and 7. @@ -38,12 +34,11 @@ * Index of switch port. Must be between 0 and 15. */ -#define DSA_8021Q_DIR_SHIFT 10 -#define DSA_8021Q_DIR_MASK GENMASK(11, 10) -#define DSA_8021Q_DIR(x) (((x) << DSA_8021Q_DIR_SHIFT) & \ - DSA_8021Q_DIR_MASK) -#define DSA_8021Q_DIR_RX DSA_8021Q_DIR(1) -#define DSA_8021Q_DIR_TX DSA_8021Q_DIR(2) +#define DSA_8021Q_RSV_VAL 3 +#define DSA_8021Q_RSV_SHIFT 10 +#define DSA_8021Q_RSV_MASK GENMASK(11, 10) +#define DSA_8021Q_RSV ((DSA_8021Q_RSV_VAL << DSA_8021Q_RSV_SHIFT) & \ + DSA_8021Q_RSV_MASK) #define DSA_8021Q_SWITCH_ID_SHIFT 6 #define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6) @@ -72,29 +67,19 @@ u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num) /* The VBID value of 0 is reserved for precise TX, but it is also * reserved/invalid for the bridge_num, so all is well. */ - return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num); + return DSA_8021Q_RSV | DSA_8021Q_VBID(bridge_num); } EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid); -/* Returns the VID to be inserted into the frame from xmit for switch steering - * instructions on egress. Encodes switch ID and port ID. - */ -u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp) -{ - return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(dp->ds->index) | - DSA_8021Q_PORT(dp->index); -} -EXPORT_SYMBOL_GPL(dsa_tag_8021q_tx_vid); - /* Returns the VID that will be installed as pvid for this switch port, sent as * tagged egress towards the CPU port and decoded by the rcv function. */ -u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp) +u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp) { - return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(dp->ds->index) | + return DSA_8021Q_RSV | DSA_8021Q_SWITCH_ID(dp->ds->index) | DSA_8021Q_PORT(dp->index); } -EXPORT_SYMBOL_GPL(dsa_tag_8021q_rx_vid); +EXPORT_SYMBOL_GPL(dsa_tag_8021q_standalone_vid); /* Returns the decoded switch ID from the RX VID. */ int dsa_8021q_rx_switch_id(u16 vid) @@ -119,21 +104,11 @@ static int dsa_tag_8021q_rx_vbid(u16 vid) return (vbid_hi << 2) | vbid_lo; } -bool vid_is_dsa_8021q_rxvlan(u16 vid) -{ - return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX; -} -EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_rxvlan); - -bool vid_is_dsa_8021q_txvlan(u16 vid) -{ - return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_TX; -} -EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_txvlan); - bool vid_is_dsa_8021q(u16 vid) { - return vid_is_dsa_8021q_rxvlan(vid) || vid_is_dsa_8021q_txvlan(vid); + u16 rsv = (vid & DSA_8021Q_RSV_MASK) >> DSA_8021Q_RSV_SHIFT; + + return rsv == DSA_8021Q_RSV_VAL; } EXPORT_SYMBOL_GPL(vid_is_dsa_8021q); @@ -251,18 +226,8 @@ int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, u16 flags = 0; if (dsa_port_is_user(dp)) - flags |= BRIDGE_VLAN_INFO_UNTAGGED; - - /* Standalone VLANs are PVIDs */ - if (vid_is_dsa_8021q_rxvlan(info->vid) && - dsa_8021q_rx_switch_id(info->vid) == ds->index && - dsa_8021q_rx_source_port(info->vid) == dp->index) - flags |= BRIDGE_VLAN_INFO_PVID; - - /* And bridging VLANs are PVIDs too on user ports */ - if (dsa_tag_8021q_rx_vbid(info->vid) && - dsa_port_is_user(dp)) - flags |= BRIDGE_VLAN_INFO_PVID; + flags |= BRIDGE_VLAN_INFO_UNTAGGED | + BRIDGE_VLAN_INFO_PVID; err = dsa_port_do_tag_8021q_vlan_add(dp, info->vid, flags); @@ -294,52 +259,24 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, return 0; } -/* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single - * front-panel switch port (here swp0). +/* There are 2 ways of offloading tag_8021q VLANs. * - * Port identification through VLAN (802.1Q) tags has different requirements - * for it to work effectively: - * - On RX (ingress from network): each front-panel port must have a pvid - * that uniquely identifies it, and the egress of this pvid must be tagged - * towards the CPU port, so that software can recover the source port based - * on the VID in the frame. But this would only work for standalone ports; - * if bridged, this VLAN setup would break autonomous forwarding and would - * force all switched traffic to pass through the CPU. So we must also make - * the other front-panel ports members of this VID we're adding, albeit - * we're not making it their PVID (they'll still have their own). - * - On TX (ingress from CPU and towards network) we are faced with a problem. - * If we were to tag traffic (from within DSA) with the port's pvid, all - * would be well, assuming the switch ports were standalone. Frames would - * have no choice but to be directed towards the correct front-panel port. - * But because we also want the RX VLAN to not break bridging, then - * inevitably that means that we have to give them a choice (of what - * front-panel port to go out on), and therefore we cannot steer traffic - * based on the RX VID. So what we do is simply install one more VID on the - * front-panel and CPU ports, and profit off of the fact that steering will - * work just by virtue of the fact that there is only one other port that's - * a member of the VID we're tagging the traffic with - the desired one. + * One is to use a hardware TCAM to push the port's standalone VLAN into the + * frame when forwarding it to the CPU, as an egress modification rule on the + * CPU port. This is preferable because it has no side effects for the + * autonomous forwarding path, and accomplishes tag_8021q's primary goal of + * identifying the source port of each packet based on VLAN ID. * - * So at the end, each front-panel port will have one RX VID (also the PVID), - * the RX VID of all other front-panel ports that are in the same bridge, and - * one TX VID. Whereas the CPU port will have the RX and TX VIDs of all - * front-panel ports, and on top of that, is also tagged-input and - * tagged-output (VLAN trunk). + * The other is to commit the tag_8021q VLAN as a PVID to the VLAN table, and + * to configure the port as VLAN-unaware. This is less preferable because + * unique source port identification can only be done for standalone ports; + * under a VLAN-unaware bridge, all ports share the same tag_8021q VLAN as + * PVID, and under a VLAN-aware bridge, packets received by software will not + * have tag_8021q VLANs appended, just bridge VLANs. * - * CPU port CPU port - * +-------------+-----+-------------+ +-------------+-----+-------------+ - * | RX VID | | | | TX VID | | | - * | of swp0 | | | | of swp0 | | | - * | +-----+ | | +-----+ | - * | ^ T | | | Tagged | - * | | | | | ingress | - * | +-------+---+---+-------+ | | +-----------+ | - * | | | | | | | | Untagged | - * | | U v U v U v | | v egress | - * | +-----+ +-----+ +-----+ +-----+ | | +-----+ +-----+ +-----+ +-----+ | - * | | | | | | | | | | | | | | | | | | | | - * | |PVID | | | | | | | | | | | | | | | | | | - * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+ - * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3 + * For tag_8021q implementations of the second type, this method is used to + * replace the standalone tag_8021q VLAN of a port with the tag_8021q VLAN to + * be used for VLAN-unaware bridging. */ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge) @@ -351,7 +288,7 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, /* Delete the standalone VLAN of the port and replace it with a * bridging VLAN */ - standalone_vid = dsa_tag_8021q_rx_vid(dp); + standalone_vid = dsa_tag_8021q_standalone_vid(dp); bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); err = dsa_port_tag_8021q_vlan_add(dp, bridge_vid, true); @@ -374,7 +311,7 @@ void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, /* Delete the bridging VLAN of the port and replace it with a * standalone VLAN */ - standalone_vid = dsa_tag_8021q_rx_vid(dp); + standalone_vid = dsa_tag_8021q_standalone_vid(dp); bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); err = dsa_port_tag_8021q_vlan_add(dp, standalone_vid, false); @@ -388,13 +325,12 @@ void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, } EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_leave); -/* Set up a port's tag_8021q RX and TX VLAN for standalone mode operation */ +/* Set up a port's standalone tag_8021q VLAN */ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) { struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - u16 tx_vid = dsa_tag_8021q_tx_vid(dp); + u16 vid = dsa_tag_8021q_standalone_vid(dp); struct net_device *master; int err; @@ -406,30 +342,16 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) master = dp->cpu_dp->master; - /* Add this user port's RX VID to the membership list of all others - * (including itself). This is so that bridging will not be hindered. - * L2 forwarding rules still take precedence when there are no VLAN - * restrictions, so there are no concerns about leaking traffic. - */ - err = dsa_port_tag_8021q_vlan_add(dp, rx_vid, false); + err = dsa_port_tag_8021q_vlan_add(dp, vid, false); if (err) { dev_err(ds->dev, - "Failed to apply RX VID %d to port %d: %pe\n", - rx_vid, port, ERR_PTR(err)); + "Failed to apply standalone VID %d to port %d: %pe\n", + vid, port, ERR_PTR(err)); return err; } - /* Add @rx_vid to the master's RX filter. */ - vlan_vid_add(master, ctx->proto, rx_vid); - - /* Finally apply the TX VID on this port and on the CPU port */ - err = dsa_port_tag_8021q_vlan_add(dp, tx_vid, false); - if (err) { - dev_err(ds->dev, - "Failed to apply TX VID %d on port %d: %pe\n", - tx_vid, port, ERR_PTR(err)); - return err; - } + /* Add the VLAN to the master's RX filter. */ + vlan_vid_add(master, ctx->proto, vid); return err; } @@ -438,8 +360,7 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) { struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); - u16 rx_vid = dsa_tag_8021q_rx_vid(dp); - u16 tx_vid = dsa_tag_8021q_tx_vid(dp); + u16 vid = dsa_tag_8021q_standalone_vid(dp); struct net_device *master; /* The CPU port is implicitly configured by @@ -450,11 +371,9 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) master = dp->cpu_dp->master; - dsa_port_tag_8021q_vlan_del(dp, rx_vid, false); - - vlan_vid_del(master, ctx->proto, rx_vid); + dsa_port_tag_8021q_vlan_del(dp, vid, false); - dsa_port_tag_8021q_vlan_del(dp, tx_vid, false); + vlan_vid_del(master, ctx->proto, vid); } static int dsa_tag_8021q_setup(struct dsa_switch *ds) diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index 1144a87ad0db..37ccf00404ea 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -62,7 +62,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct dsa_port *dp = dsa_slave_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); - u16 tx_vid = dsa_tag_8021q_tx_vid(dp); + u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); struct ethhdr *hdr = eth_hdr(skb); if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest)) diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 9c5c00980b06..f3832ac54098 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -267,7 +267,7 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb, struct dsa_port *dp = dsa_slave_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); - u16 tx_vid = dsa_tag_8021q_tx_vid(dp); + u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); if (skb->offload_fwd_mark) return sja1105_imprecise_xmit(skb, netdev); @@ -295,7 +295,7 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb, struct dsa_port *dp = dsa_slave_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); - u16 tx_vid = dsa_tag_8021q_tx_vid(dp); + u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); __be32 *tx_trailer; __be16 *tx_header; int trailer_pos; -- cgit v1.2.3-71-gd317 From b6362bdf750b4ba266d4a10156174ec52460e73d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 25 Feb 2022 11:22:21 +0200 Subject: net: dsa: tag_8021q: rename dsa_8021q_bridge_tx_fwd_offload_vid The dsa_8021q_bridge_tx_fwd_offload_vid is no longer used just for bridge TX forwarding offload, it is the private VLAN reserved for VLAN-unaware bridging in a way that is compatible with FDB isolation. So just rename it dsa_tag_8021q_bridge_vid. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_vl.c | 2 +- include/linux/dsa/8021q.h | 2 +- net/dsa/tag_8021q.c | 8 ++++---- net/dsa/tag_sja1105.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index 1eef60207c6b..b7e95d60a6e4 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -306,7 +306,7 @@ static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp) bridge_num = dsa_port_bridge_num_get(dp); - return dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + return dsa_tag_8021q_bridge_vid(bridge_num); } static int sja1105_init_virtual_links(struct sja1105_private *priv, diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index b4e2862633f6..3ed117e299ec 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -47,7 +47,7 @@ void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, int vbid); -u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); +u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num); u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp); diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index eac43f5b4e07..a786569203f0 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -62,14 +62,14 @@ #define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \ DSA_8021Q_PORT_MASK) -u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num) +u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num) { /* The VBID value of 0 is reserved for precise TX, but it is also * reserved/invalid for the bridge_num, so all is well. */ return DSA_8021Q_RSV | DSA_8021Q_VBID(bridge_num); } -EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid); +EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_vid); /* Returns the VID that will be installed as pvid for this switch port, sent as * tagged egress towards the CPU port and decoded by the rcv function. @@ -289,7 +289,7 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, * bridging VLAN */ standalone_vid = dsa_tag_8021q_standalone_vid(dp); - bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); + bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num); err = dsa_port_tag_8021q_vlan_add(dp, bridge_vid, true); if (err) @@ -312,7 +312,7 @@ void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, * standalone VLAN */ standalone_vid = dsa_tag_8021q_standalone_vid(dp); - bridge_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); + bridge_vid = dsa_tag_8021q_bridge_vid(bridge.num); err = dsa_port_tag_8021q_vlan_add(dp, standalone_vid, false); if (err) { diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index f3832ac54098..83e4136516b0 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -226,7 +226,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, * TX VLAN that targets the bridge's entire broadcast domain, * instead of just the specific port. */ - tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + tx_vid = dsa_tag_8021q_bridge_vid(bridge_num); return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); } -- cgit v1.2.3-71-gd317 From bc437f7515f5e14aec9f2801412d9ea48116a97d Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Sat, 12 Feb 2022 21:57:30 -0500 Subject: iio: afe: rescale: expose scale processing function In preparation for the addition of kunit tests, expose the logic responsible for combining channel scales. Signed-off-by: Liam Beguin Reviewed-by: Peter Rosin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220213025739.2561834-2-liambeguin@gmail.com Signed-off-by: Jonathan Cameron --- drivers/iio/afe/iio-rescale.c | 65 ++++++++++++++++++----------------------- include/linux/iio/afe/rescale.h | 32 ++++++++++++++++++++ 2 files changed, 60 insertions(+), 37 deletions(-) create mode 100644 include/linux/iio/afe/rescale.h (limited to 'include/linux') diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index 69710c481376..65832dd09249 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -15,32 +15,43 @@ #include #include +#include #include #include -struct rescale; - -struct rescale_cfg { - enum iio_chan_type type; - int (*props)(struct device *dev, struct rescale *rescale); -}; +int rescale_process_scale(struct rescale *rescale, int scale_type, + int *val, int *val2) +{ + s64 tmp; -struct rescale { - const struct rescale_cfg *cfg; - struct iio_channel *source; - struct iio_chan_spec chan; - struct iio_chan_spec_ext_info *ext_info; - bool chan_processed; - s32 numerator; - s32 denominator; -}; + switch (scale_type) { + case IIO_VAL_FRACTIONAL: + *val *= rescale->numerator; + *val2 *= rescale->denominator; + return scale_type; + case IIO_VAL_INT: + *val *= rescale->numerator; + if (rescale->denominator == 1) + return scale_type; + *val2 = rescale->denominator; + return IIO_VAL_FRACTIONAL; + case IIO_VAL_FRACTIONAL_LOG2: + tmp = (s64)*val * 1000000000LL; + tmp = div_s64(tmp, rescale->denominator); + tmp *= rescale->numerator; + tmp = div_s64(tmp, 1000000000LL); + *val = tmp; + return scale_type; + default: + return -EOPNOTSUPP; + } +} static int rescale_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct rescale *rescale = iio_priv(indio_dev); - s64 tmp; int ret; switch (mask) { @@ -66,27 +77,7 @@ static int rescale_read_raw(struct iio_dev *indio_dev, } else { ret = iio_read_channel_scale(rescale->source, val, val2); } - switch (ret) { - case IIO_VAL_FRACTIONAL: - *val *= rescale->numerator; - *val2 *= rescale->denominator; - return ret; - case IIO_VAL_INT: - *val *= rescale->numerator; - if (rescale->denominator == 1) - return ret; - *val2 = rescale->denominator; - return IIO_VAL_FRACTIONAL; - case IIO_VAL_FRACTIONAL_LOG2: - tmp = (s64)*val * 1000000000LL; - tmp = div_s64(tmp, rescale->denominator); - tmp *= rescale->numerator; - tmp = div_s64(tmp, 1000000000LL); - *val = tmp; - return ret; - default: - return -EOPNOTSUPP; - } + return rescale_process_scale(rescale, ret, val, val2); default: return -EINVAL; } diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h new file mode 100644 index 000000000000..8a2eb34af327 --- /dev/null +++ b/include/linux/iio/afe/rescale.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 Axentia Technologies AB + */ + +#ifndef __IIO_RESCALE_H__ +#define __IIO_RESCALE_H__ + +#include +#include + +struct device; +struct rescale; + +struct rescale_cfg { + enum iio_chan_type type; + int (*props)(struct device *dev, struct rescale *rescale); +}; + +struct rescale { + const struct rescale_cfg *cfg; + struct iio_channel *source; + struct iio_chan_spec chan; + struct iio_chan_spec_ext_info *ext_info; + bool chan_processed; + s32 numerator; + s32 denominator; +}; + +int rescale_process_scale(struct rescale *rescale, int scale_type, + int *val, int *val2); +#endif /* __IIO_RESCALE_H__ */ -- cgit v1.2.3-71-gd317 From a29c3283653b80b916c5ca5292c5d36415e38e92 Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Sat, 12 Feb 2022 21:57:32 -0500 Subject: iio: afe: rescale: add offset support This is a preparatory change required for the addition of temperature sensing front ends. Signed-off-by: Liam Beguin Reviewed-by: Peter Rosin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220213025739.2561834-4-liambeguin@gmail.com Signed-off-by: Jonathan Cameron --- drivers/iio/afe/iio-rescale.c | 81 +++++++++++++++++++++++++++++++++++++++++ include/linux/iio/afe/rescale.h | 4 ++ 2 files changed, 85 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index e67d9a9e6135..8eaf766e28cc 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -3,6 +3,7 @@ * IIO rescale driver * * Copyright (C) 2018 Axentia Technologies AB + * Copyright (C) 2022 Liam Beguin * * Author: Peter Rosin */ @@ -81,11 +82,46 @@ int rescale_process_scale(struct rescale *rescale, int scale_type, } } +int rescale_process_offset(struct rescale *rescale, int scale_type, + int scale, int scale2, int schan_off, + int *val, int *val2) +{ + s64 tmp, tmp2; + + switch (scale_type) { + case IIO_VAL_FRACTIONAL: + tmp = (s64)rescale->offset * scale2; + *val = div_s64(tmp, scale) + schan_off; + return IIO_VAL_INT; + case IIO_VAL_INT: + *val = div_s64(rescale->offset, scale) + schan_off; + return IIO_VAL_INT; + case IIO_VAL_FRACTIONAL_LOG2: + tmp = (s64)rescale->offset * (1 << scale2); + *val = div_s64(tmp, scale) + schan_off; + return IIO_VAL_INT; + case IIO_VAL_INT_PLUS_NANO: + tmp = (s64)rescale->offset * 1000000000LL; + tmp2 = ((s64)scale * 1000000000LL) + scale2; + *val = div64_s64(tmp, tmp2) + schan_off; + return IIO_VAL_INT; + case IIO_VAL_INT_PLUS_MICRO: + tmp = (s64)rescale->offset * 1000000LL; + tmp2 = ((s64)scale * 1000000LL) + scale2; + *val = div64_s64(tmp, tmp2) + schan_off; + return IIO_VAL_INT; + default: + return -EOPNOTSUPP; + } +} + static int rescale_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct rescale *rescale = iio_priv(indio_dev); + int scale, scale2; + int schan_off = 0; int ret; switch (mask) { @@ -112,6 +148,47 @@ static int rescale_read_raw(struct iio_dev *indio_dev, ret = iio_read_channel_scale(rescale->source, val, val2); } return rescale_process_scale(rescale, ret, val, val2); + case IIO_CHAN_INFO_OFFSET: + /* + * Processed channels are scaled 1-to-1 and source offset is + * already taken into account. + * + * In other cases, real world measurement are expressed as: + * + * schan_scale * (raw + schan_offset) + * + * Given that the rescaler parameters are applied recursively: + * + * rescaler_scale * (schan_scale * (raw + schan_offset) + + * rescaler_offset) + * + * Or, + * + * (rescaler_scale * schan_scale) * (raw + + * (schan_offset + rescaler_offset / schan_scale) + * + * Thus, reusing the original expression the parameters exposed + * to userspace are: + * + * scale = schan_scale * rescaler_scale + * offset = schan_offset + rescaler_offset / schan_scale + */ + if (rescale->chan_processed) { + *val = rescale->offset; + return IIO_VAL_INT; + } + + if (iio_channel_has_info(rescale->source->channel, + IIO_CHAN_INFO_OFFSET)) { + ret = iio_read_channel_offset(rescale->source, + &schan_off, NULL); + if (ret != IIO_VAL_INT) + return ret < 0 ? ret : -EOPNOTSUPP; + } + + ret = iio_read_channel_scale(rescale->source, &scale, &scale2); + return rescale_process_offset(rescale, ret, scale, scale2, + schan_off, val, val2); default: return -EINVAL; } @@ -188,6 +265,9 @@ static int rescale_configure_channel(struct device *dev, chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE); + if (rescale->offset) + chan->info_mask_separate |= BIT(IIO_CHAN_INFO_OFFSET); + /* * Using .read_avail() is fringe to begin with and makes no sense * whatsoever for processed channels, so we make sure that this cannot @@ -352,6 +432,7 @@ static int rescale_probe(struct platform_device *pdev) rescale->cfg = of_device_get_match_data(dev); rescale->numerator = 1; rescale->denominator = 1; + rescale->offset = 0; ret = rescale->cfg->props(dev, rescale); if (ret) diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h index 8a2eb34af327..6eecb435488f 100644 --- a/include/linux/iio/afe/rescale.h +++ b/include/linux/iio/afe/rescale.h @@ -25,8 +25,12 @@ struct rescale { bool chan_processed; s32 numerator; s32 denominator; + s32 offset; }; int rescale_process_scale(struct rescale *rescale, int scale_type, int *val, int *val2); +int rescale_process_offset(struct rescale *rescale, int scale_type, + int scale, int scale2, int schan_off, + int *val, int *val2); #endif /* __IIO_RESCALE_H__ */ -- cgit v1.2.3-71-gd317 From e75d16e58467c5703821e12536c7dc438f3c425d Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Thu, 24 Feb 2022 07:12:09 +0100 Subject: hwmon: (core) Add support for pwm auto channels attribute pwm[1-*]_auto_channels_temp is documented as an official hwmon sysfs attribute, yet there is no support for it in the new with_info-API. Fix that. Signed-off-by: Armin Wolf Link: https://lore.kernel.org/r/20220224061210.16452-2-W_Armin@gmx.de Signed-off-by: Guenter Roeck --- drivers/hwmon/hwmon.c | 1 + include/linux/hwmon.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 0d6c6809f26c..989e2c8496dd 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -604,6 +604,7 @@ static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_enable] = "pwm%d_enable", [hwmon_pwm_mode] = "pwm%d_mode", [hwmon_pwm_freq] = "pwm%d_freq", + [hwmon_pwm_auto_channels_temp] = "pwm%d_auto_channels_temp", }; static const char * const hwmon_intrusion_attr_templates[] = { diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index fad1f1df26df..eba380b76d15 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -332,12 +332,14 @@ enum hwmon_pwm_attributes { hwmon_pwm_enable, hwmon_pwm_mode, hwmon_pwm_freq, + hwmon_pwm_auto_channels_temp, }; #define HWMON_PWM_INPUT BIT(hwmon_pwm_input) #define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable) #define HWMON_PWM_MODE BIT(hwmon_pwm_mode) #define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) +#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp) enum hwmon_intrusion_attributes { hwmon_intrusion_alarm, -- cgit v1.2.3-71-gd317 From d72ce7d324786257410bec6b36e3756647dd76fd Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 26 Feb 2022 00:27:55 +0100 Subject: power: supply: ab8500: Standardize maintenance charging Maintenance charging is the phase of keeping up the charge after the battery has charged fully using CC/CV charging. This can be done in many successive phases and is usually done with a slightly lower constant voltage than CV, and a slightly lower allowed current. Add an array of maintenance charging points each with a current, voltage and safety timer, and add helper functions to use these. Migrate the AB8500 code over. This is used in several Samsung products using the AB8500 and these batteries and their complete parameters will be added later as full examples, but the default battery in the AB8500 code serves as a reasonable example so far. Reviewed-by: Matti Vaittinen Signed-off-by: Linus Walleij Signed-off-by: Sebastian Reichel --- drivers/power/supply/ab8500-bm.h | 14 ------- drivers/power/supply/ab8500_bmdata.c | 27 ++++++++++---- drivers/power/supply/ab8500_chargalg.c | 41 +++++++++++++++----- drivers/power/supply/power_supply_core.c | 11 ++++++ include/linux/power_supply.h | 64 ++++++++++++++++++++++++++++++++ 5 files changed, 126 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h index 6efd5174dbce..4d74d21cf1eb 100644 --- a/drivers/power/supply/ab8500-bm.h +++ b/drivers/power/supply/ab8500-bm.h @@ -331,24 +331,12 @@ struct ab8500_maxim_parameters { * struct ab8500_battery_type - different batteries supported * @resis_high: battery upper resistance limit * @resis_low: battery lower resistance limit - * @maint_a_cur_lvl: charger current in maintenance A state in mA - * @maint_a_vol_lvl: charger voltage in maintenance A state in mV - * @maint_a_chg_timer_h: charge time in maintenance A state - * @maint_b_cur_lvl: charger current in maintenance B state in mA - * @maint_b_vol_lvl: charger voltage in maintenance B state in mV - * @maint_b_chg_timer_h: charge time in maintenance B state * @low_high_cur_lvl: charger current in temp low/high state in mA * @low_high_vol_lvl: charger voltage in temp low/high state in mV' */ struct ab8500_battery_type { int resis_high; int resis_low; - int maint_a_cur_lvl; - int maint_a_vol_lvl; - int maint_a_chg_timer_h; - int maint_b_cur_lvl; - int maint_b_vol_lvl; - int maint_b_chg_timer_h; int low_high_cur_lvl; int low_high_vol_lvl; }; @@ -393,7 +381,6 @@ struct ab8500_bm_charger_parameters { * @usb_safety_tmr_h safety timer for usb charger * @bkup_bat_v voltage which we charge the backup battery with * @bkup_bat_i current which we charge the backup battery with - * @no_maintenance indicates that maintenance charging is disabled * @capacity_scaling indicates whether capacity scaling is to be used * @chg_unknown_bat flag to enable charging of unknown batteries * @enable_overshoot flag to enable VBAT overshoot control @@ -417,7 +404,6 @@ struct ab8500_bm_data { int usb_safety_tmr_h; int bkup_bat_v; int bkup_bat_i; - bool no_maintenance; bool capacity_scaling; bool chg_unknown_bat; bool enable_overshoot; diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c index d8fc72be0f0e..66a454942c7c 100644 --- a/drivers/power/supply/ab8500_bmdata.c +++ b/drivers/power/supply/ab8500_bmdata.c @@ -58,16 +58,25 @@ static struct power_supply_resistance_temp_table temp_to_batres_tbl_thermistor[] { .temp = -20, .resistance = 198 /* 595 mOhm */ }, }; +static struct power_supply_maintenance_charge_table ab8500_maint_charg_table[] = { + { + /* Maintenance charging phase A, 60 hours */ + .charge_current_max_ua = 400000, + .charge_voltage_max_uv = 4050000, + .charge_safety_timer_minutes = 60*60, + }, + { + /* Maintenance charging phase B, 200 hours */ + .charge_current_max_ua = 400000, + .charge_voltage_max_uv = 4000000, + .charge_safety_timer_minutes = 200*60, + } +}; + /* Default battery type for reference designs is the unknown type */ static struct ab8500_battery_type bat_type_thermistor_unknown = { .resis_high = 0, .resis_low = 0, - .maint_a_cur_lvl = 400, - .maint_a_vol_lvl = 4050, - .maint_a_chg_timer_h = 60, - .maint_b_cur_lvl = 400, - .maint_b_vol_lvl = 4000, - .maint_b_chg_timer_h = 200, .low_high_cur_lvl = 300, .low_high_vol_lvl = 4000, }; @@ -124,7 +133,6 @@ struct ab8500_bm_data ab8500_bm_data = { .usb_safety_tmr_h = 4, .bkup_bat_v = BUP_VCH_SEL_2P6V, .bkup_bat_i = BUP_ICH_SEL_150UA, - .no_maintenance = false, .capacity_scaling = false, .chg_unknown_bat = false, .enable_overshoot = false, @@ -179,6 +187,11 @@ int ab8500_bm_of_probe(struct power_supply *psy, /* Charging stops when we drop below this current */ bi->charge_term_current_ua = 200000; + if (!bi->maintenance_charge || !bi->maintenance_charge_size) { + bi->maintenance_charge = ab8500_maint_charg_table; + bi->maintenance_charge_size = ARRAY_SIZE(ab8500_maint_charg_table); + } + /* * Internal resistance and factory resistance are tightly coupled * so both MUST be defined or we fall back to defaults. diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index b5a3096e78a1..6054996b6260 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -430,7 +430,7 @@ static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di) /** * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer * @di: pointer to the ab8500_chargalg structure - * @duration: duration of ther maintenance timer in hours + * @duration: duration of ther maintenance timer in minutes * * The maintenance timer is used to maintain the charge in the battery once * the battery is considered full. These timers are chosen to match the @@ -439,9 +439,10 @@ static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di) static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di, int duration) { + /* Set a timer in minutes with a 30 second range */ hrtimer_set_expires_range(&di->maintenance_timer, - ktime_set(duration * ONE_HOUR_IN_SECONDS, 0), - ktime_set(FIVE_MINUTES_IN_SECONDS, 0)); + ktime_set(duration * 60, 0), + ktime_set(30, 0)); di->events.maintenance_timer_expired = false; hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL); } @@ -1223,6 +1224,7 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy) static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) { struct power_supply_battery_info *bi = di->bm->bi; + struct power_supply_maintenance_charge_table *mt; int charger_status; int ret; @@ -1433,7 +1435,12 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) handle_maxim_chg_curr(di); if (di->charge_status == POWER_SUPPLY_STATUS_FULL && di->maintenance_chg) { - if (di->bm->no_maintenance) + /* + * The battery is fully charged, check if we support + * maintenance charging else go back to waiting for + * the recharge voltage limit. + */ + if (!power_supply_supports_maintenance_charging(bi)) ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE_INIT); else @@ -1454,12 +1461,19 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) break; case STATE_MAINTENANCE_A_INIT: + mt = power_supply_get_maintenance_charging_setting(bi, 0); + if (!mt) { + /* No maintenance A state, go back to normal */ + ab8500_chargalg_state_to(di, STATE_NORMAL_INIT); + power_supply_changed(di->chargalg_psy); + break; + } ab8500_chargalg_stop_safety_timer(di); ab8500_chargalg_start_maintenance_timer(di, - di->bm->bat_type->maint_a_chg_timer_h); + mt->charge_safety_timer_minutes); ab8500_chargalg_start_charging(di, - di->bm->bat_type->maint_a_vol_lvl, - di->bm->bat_type->maint_a_cur_lvl); + mt->charge_voltage_max_uv, + mt->charge_current_max_ua); ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A); power_supply_changed(di->chargalg_psy); fallthrough; @@ -1472,11 +1486,18 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) break; case STATE_MAINTENANCE_B_INIT: + mt = power_supply_get_maintenance_charging_setting(bi, 1); + if (!mt) { + /* No maintenance B state, go back to normal */ + ab8500_chargalg_state_to(di, STATE_NORMAL_INIT); + power_supply_changed(di->chargalg_psy); + break; + } ab8500_chargalg_start_maintenance_timer(di, - di->bm->bat_type->maint_b_chg_timer_h); + mt->charge_safety_timer_minutes); ab8500_chargalg_start_charging(di, - di->bm->bat_type->maint_b_vol_lvl, - di->bm->bat_type->maint_b_cur_lvl); + mt->charge_voltage_max_uv, + mt->charge_current_max_ua); ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B); power_supply_changed(di->chargalg_psy); fallthrough; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 8dbd1197cc62..accbbd36bfe7 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -595,6 +595,7 @@ int power_supply_get_battery_info(struct power_supply *psy, info->precharge_voltage_max_uv = -EINVAL; info->charge_restart_voltage_uv = -EINVAL; info->overvoltage_limit_uv = -EINVAL; + info->maintenance_charge = NULL; info->temp_ambient_alert_min = INT_MIN; info->temp_ambient_alert_max = INT_MAX; info->temp_alert_min = INT_MIN; @@ -844,6 +845,16 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t } EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple); +struct power_supply_maintenance_charge_table * +power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, + int index) +{ + if (index >= info->maintenance_charge_size) + return NULL; + return &info->maintenance_charge[index]; +} +EXPORT_SYMBOL_GPL(power_supply_get_maintenance_charging_setting); + /** * power_supply_ocv2cap_simple() - find the battery capacity * @table: Pointer to battery OCV lookup table diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index c135196aa9d1..8ced6550caa7 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -349,6 +349,52 @@ struct power_supply_resistance_temp_table { int resistance; /* internal resistance percent */ }; +/** + * struct power_supply_maintenance_charge_table - setting for maintenace charging + * @charge_current_max_ua: maintenance charging current that is used to keep + * the charge of the battery full as current is consumed after full charging. + * The corresponding charge_voltage_max_uv is used as a safeguard: when we + * reach this voltage the maintenance charging current is turned off. It is + * turned back on if we fall below this voltage. + * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit + * lower than the constant_charge_voltage_max_uv. We can apply this settings + * charge_current_max_ua until we get back up to this voltage. + * @safety_timer_minutes: maintenance charging safety timer, with an expiry + * time in minutes. We will only use maintenance charging in this setting + * for a certain amount of time, then we will first move to the next + * maintenance charge current and voltage pair in respective array and wait + * for the next safety timer timeout, or, if we reached the last maintencance + * charging setting, disable charging until we reach + * charge_restart_voltage_uv and restart ordinary CC/CV charging from there. + * These timers should be chosen to align with the typical discharge curve + * for the battery. + * + * When the main CC/CV charging is complete the battery can optionally be + * maintenance charged at the voltages from this table: a table of settings is + * traversed using a slightly lower current and voltage than what is used for + * CC/CV charging. The maintenance charging will for safety reasons not go on + * indefinately: we lower the current and voltage with successive maintenance + * settings, then disable charging completely after we reach the last one, + * and after that we do not restart charging until we reach + * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart + * ordinary CC/CV charging from there. + * + * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged + * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for + * 60 hours, then maintenance charged at 600mA and 4100mV for 200 hours. + * After this the charge cycle is restarted waiting for + * charge_restart_voltage_uv. + * + * For most mobile electronics this type of maintenance charging is enough for + * the user to disconnect the device and make use of it before both maintenance + * charging cycles are complete. + */ +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + #define POWER_SUPPLY_OCV_TEMP_MAX 20 /** @@ -394,6 +440,10 @@ struct power_supply_resistance_temp_table { * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of * the CC (constant current) charging phase and the beginning of the CV * (constant voltage) charging phase. + * @maintenance_charge: an array of maintenance charging settings to be used + * after the main CC/CV charging phase is complete. + * @maintenance_charge_size: the number of maintenance charging settings in + * maintenance_charge. * @factory_internal_resistance_uohm: the internal resistance of the battery * at fabrication time, expressed in microohms. This resistance will vary * depending on the lifetime and charge of the battery, so this is just a @@ -543,6 +593,8 @@ struct power_supply_battery_info { int overvoltage_limit_uv; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; + struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; int factory_internal_resistance_uohm; int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX]; int temp_ambient_alert_min; @@ -596,6 +648,8 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, extern int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, int table_len, int temp); +extern struct power_supply_maintenance_charge_table * +power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); int power_supply_get_property_from_supplier(struct power_supply *psy, @@ -603,6 +657,16 @@ int power_supply_get_property_from_supplier(struct power_supply *psy, union power_supply_propval *val); extern int power_supply_set_battery_charged(struct power_supply *psy); +static inline bool +power_supply_supports_maintenance_charging(struct power_supply_battery_info *info) +{ + struct power_supply_maintenance_charge_table *mt; + + mt = power_supply_get_maintenance_charging_setting(info, 0); + + return (mt != NULL); +} + #ifdef CONFIG_POWER_SUPPLY extern int power_supply_is_system_supplied(void); #else -- cgit v1.2.3-71-gd317 From 0e8b903b522b5a3cb473035cea085d396dd7150a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 26 Feb 2022 00:27:56 +0100 Subject: power: supply: ab8500: Standardize alert mode charging The AB8500 code is using a special current and voltage setting when the battery is in "alert mode", i.e. when it is starting to go outside normal operating conditions so it is too cold or too hot. This makes sense as a way for the charging algorithm to deal with hostile environments. Add the needed members to the struct power_supply_battery_info, and switch the AB8500 charging code over to using this. Reviewed-by: Matti Vaittineen Signed-off-by: Linus Walleij Signed-off-by: Sebastian Reichel --- drivers/power/supply/ab8500-bm.h | 4 --- drivers/power/supply/ab8500_bmdata.c | 15 ++++++++-- drivers/power/supply/ab8500_chargalg.c | 48 +++++++++++++++++++++----------- drivers/power/supply/power_supply_core.c | 4 +++ include/linux/power_supply.h | 17 +++++++++++ 5 files changed, 66 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h index 4d74d21cf1eb..91ef9d4a5222 100644 --- a/drivers/power/supply/ab8500-bm.h +++ b/drivers/power/supply/ab8500-bm.h @@ -331,14 +331,10 @@ struct ab8500_maxim_parameters { * struct ab8500_battery_type - different batteries supported * @resis_high: battery upper resistance limit * @resis_low: battery lower resistance limit - * @low_high_cur_lvl: charger current in temp low/high state in mA - * @low_high_vol_lvl: charger voltage in temp low/high state in mV' */ struct ab8500_battery_type { int resis_high; int resis_low; - int low_high_cur_lvl; - int low_high_vol_lvl; }; /** diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c index 66a454942c7c..bf0b74773eee 100644 --- a/drivers/power/supply/ab8500_bmdata.c +++ b/drivers/power/supply/ab8500_bmdata.c @@ -77,8 +77,6 @@ static struct power_supply_maintenance_charge_table ab8500_maint_charg_table[] = static struct ab8500_battery_type bat_type_thermistor_unknown = { .resis_high = 0, .resis_low = 0, - .low_high_cur_lvl = 300, - .low_high_vol_lvl = 4000, }; static const struct ab8500_bm_capacity_levels cap_levels = { @@ -192,6 +190,19 @@ int ab8500_bm_of_probe(struct power_supply *psy, bi->maintenance_charge_size = ARRAY_SIZE(ab8500_maint_charg_table); } + if (bi->alert_low_temp_charge_current_ua < 0 || + bi->alert_low_temp_charge_voltage_uv < 0) + { + bi->alert_low_temp_charge_current_ua = 300000; + bi->alert_low_temp_charge_voltage_uv = 4000000; + } + if (bi->alert_high_temp_charge_current_ua < 0 || + bi->alert_high_temp_charge_voltage_uv < 0) + { + bi->alert_high_temp_charge_current_ua = 300000; + bi->alert_high_temp_charge_voltage_uv = 4000000; + } + /* * Internal resistance and factory resistance are tightly coupled * so both MUST be defined or we fall back to defaults. diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index 6054996b6260..c9c7f7028af6 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -149,7 +149,8 @@ struct ab8500_chargalg_events { bool batt_ovv; bool batt_rem; bool btemp_underover; - bool btemp_lowhigh; + bool btemp_low; + bool btemp_high; bool main_thermal_prot; bool usb_thermal_prot; bool main_ovv; @@ -684,26 +685,31 @@ static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di) di->batt_data.temp < (bi->temp_alert_max - di->t_hyst_norm)) { /* Temp OK! */ di->events.btemp_underover = false; - di->events.btemp_lowhigh = false; + di->events.btemp_low = false; + di->events.btemp_high = false; di->t_hyst_norm = 0; di->t_hyst_lowhigh = 0; } else { - if (((di->batt_data.temp >= bi->temp_alert_max) && - (di->batt_data.temp < - (bi->temp_max - di->t_hyst_lowhigh))) || - ((di->batt_data.temp > - (bi->temp_min + di->t_hyst_lowhigh)) && - (di->batt_data.temp <= bi->temp_alert_min))) { - /* TEMP minor!!!!! */ + if ((di->batt_data.temp >= bi->temp_alert_max) && + (di->batt_data.temp < (bi->temp_max - di->t_hyst_lowhigh))) { + /* Alert zone for high temperature */ di->events.btemp_underover = false; - di->events.btemp_lowhigh = true; + di->events.btemp_high = true; + di->t_hyst_norm = di->bm->temp_hysteresis; + di->t_hyst_lowhigh = 0; + } else if ((di->batt_data.temp > (bi->temp_min + di->t_hyst_lowhigh)) && + (di->batt_data.temp <= bi->temp_alert_min)) { + /* Alert zone for low temperature */ + di->events.btemp_underover = false; + di->events.btemp_low = true; di->t_hyst_norm = di->bm->temp_hysteresis; di->t_hyst_lowhigh = 0; } else if (di->batt_data.temp <= bi->temp_min || di->batt_data.temp >= bi->temp_max) { /* TEMP major!!!!! */ di->events.btemp_underover = true; - di->events.btemp_lowhigh = false; + di->events.btemp_low = false; + di->events.btemp_high = false; di->t_hyst_norm = 0; di->t_hyst_lowhigh = di->bm->temp_hysteresis; } else { @@ -1313,7 +1319,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT); } /* Battery temp high/low */ - else if (di->events.btemp_lowhigh) { + else if (di->events.btemp_low || di->events.btemp_high) { if (di->charge_state != STATE_TEMP_LOWHIGH) ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT); } @@ -1510,9 +1516,19 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) break; case STATE_TEMP_LOWHIGH_INIT: - ab8500_chargalg_start_charging(di, - di->bm->bat_type->low_high_vol_lvl, - di->bm->bat_type->low_high_cur_lvl); + if (di->events.btemp_low) { + ab8500_chargalg_start_charging(di, + bi->alert_low_temp_charge_voltage_uv, + bi->alert_low_temp_charge_current_ua); + } else if (di->events.btemp_high) { + ab8500_chargalg_start_charging(di, + bi->alert_high_temp_charge_voltage_uv, + bi->alert_high_temp_charge_current_ua); + } else { + dev_err(di->dev, "neither low or high temp event occured\n"); + ab8500_chargalg_state_to(di, STATE_NORMAL_INIT); + break; + } ab8500_chargalg_stop_maintenance_timer(di); di->charge_status = POWER_SUPPLY_STATUS_CHARGING; ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH); @@ -1520,7 +1536,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di) fallthrough; case STATE_TEMP_LOWHIGH: - if (!di->events.btemp_lowhigh) + if (!di->events.btemp_low && !di->events.btemp_high) ab8500_chargalg_state_to(di, STATE_NORMAL_INIT); break; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index accbbd36bfe7..e3d6d3ff492a 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -596,6 +596,10 @@ int power_supply_get_battery_info(struct power_supply *psy, info->charge_restart_voltage_uv = -EINVAL; info->overvoltage_limit_uv = -EINVAL; info->maintenance_charge = NULL; + info->alert_low_temp_charge_current_ua = -EINVAL; + info->alert_low_temp_charge_voltage_uv = -EINVAL; + info->alert_high_temp_charge_current_ua = -EINVAL; + info->alert_high_temp_charge_voltage_uv = -EINVAL; info->temp_ambient_alert_min = INT_MIN; info->temp_ambient_alert_max = INT_MAX; info->temp_alert_min = INT_MIN; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 8ced6550caa7..f8601598d3d3 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -444,6 +444,19 @@ struct power_supply_maintenance_charge_table { * after the main CC/CV charging phase is complete. * @maintenance_charge_size: the number of maintenance charging settings in * maintenance_charge. + * @alert_low_temp_charge_current_ua: The charging current to use if the battery + * enters low alert temperature, i.e. if the internal temperature is between + * temp_alert_min and temp_min. No matter the charging phase, this + * and alert_high_temp_charge_voltage_uv will be applied. + * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua, + * but for the charging voltage. + * @alert_high_temp_charge_current_ua: The charging current to use if the + * battery enters high alert temperature, i.e. if the internal temperature is + * between temp_alert_max and temp_max. No matter the charging phase, this + * and alert_high_temp_charge_voltage_uv will be applied, usually lowering + * the charging current as an evasive manouver. + * @alert_high_temp_charge_voltage_uv: Same as + * alert_high_temp_charge_current_ua, but for the charging voltage. * @factory_internal_resistance_uohm: the internal resistance of the battery * at fabrication time, expressed in microohms. This resistance will vary * depending on the lifetime and charge of the battery, so this is just a @@ -595,6 +608,10 @@ struct power_supply_battery_info { int constant_charge_voltage_max_uv; struct power_supply_maintenance_charge_table *maintenance_charge; int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; int factory_internal_resistance_uohm; int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX]; int temp_ambient_alert_min; -- cgit v1.2.3-71-gd317 From 1f918e0fe43ec41d906e2cf96b80b15451fed7ba Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 26 Feb 2022 00:27:57 +0100 Subject: power: supply: ab8500: Standardize BTI resistance The Battery Type Indicator (BTI) resistor is a resistor mounted between a special terminal on the battery and ground. By sending a fixed current (such as 7mA) through this resistor and measuring the voltage over it, the resistance can be determined, and this verifies the battery type. Typical side view of the battery: o o o GND BTI +3.8V Typical example of the electrical layout: +3.8 V BTI | | | + | _______ [ ] 7kOhm ___ | | | | | GND GND By verifying this resistance before attempting to charge the battery we add an additional level of security. In some systems this is used for plug-and-play of batteries with different capacity. In other cases, this is merely used to verify that the right type of battery is connected, if several batteries have the same physical shape and can be plugged into the same slot. Sometimes this is just a surplus security mechanism. Nokia and Samsung among many other vendors are known to use these BTI resistors. Add the BTI properties to struct power_supply_battery_info and switch the AB8500 charger code over to using it. Signed-off-by: Linus Walleij Signed-off-by: Sebastian Reichel --- drivers/power/supply/ab8500-bm.h | 12 ------------ drivers/power/supply/ab8500_bmdata.c | 14 +++++++------- drivers/power/supply/ab8500_btemp.c | 14 ++++++-------- drivers/power/supply/ab8500_fg.c | 4 ---- drivers/power/supply/power_supply_core.c | 26 +++++++++++++++++++++++++- include/linux/power_supply.h | 13 +++++++++++++ 6 files changed, 51 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/ab8500-bm.h b/drivers/power/supply/ab8500-bm.h index 91ef9d4a5222..180a016b3662 100644 --- a/drivers/power/supply/ab8500-bm.h +++ b/drivers/power/supply/ab8500-bm.h @@ -327,16 +327,6 @@ struct ab8500_maxim_parameters { int charger_curr_step_ua; }; -/** - * struct ab8500_battery_type - different batteries supported - * @resis_high: battery upper resistance limit - * @resis_low: battery lower resistance limit - */ -struct ab8500_battery_type { - int resis_high; - int resis_low; -}; - /** * struct ab8500_bm_capacity_levels - ab8500 capacity level data * @critical: critical capacity level in percent @@ -387,7 +377,6 @@ struct ab8500_bm_charger_parameters { * @temp_hysteresis temperature hysteresis * @maxi maximization parameters * @cap_levels capacity in percent for the different capacity levels - * @bat_type table of supported battery types * @chg_params charger parameters * @fg_params fuel gauge parameters */ @@ -410,7 +399,6 @@ struct ab8500_bm_data { int temp_hysteresis; const struct ab8500_maxim_parameters *maxi; const struct ab8500_bm_capacity_levels *cap_levels; - struct ab8500_battery_type *bat_type; const struct ab8500_bm_charger_parameters *chg_params; const struct ab8500_fg_parameters *fg_params; }; diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c index bf0b74773eee..3e6ea22372b2 100644 --- a/drivers/power/supply/ab8500_bmdata.c +++ b/drivers/power/supply/ab8500_bmdata.c @@ -73,12 +73,6 @@ static struct power_supply_maintenance_charge_table ab8500_maint_charg_table[] = } }; -/* Default battery type for reference designs is the unknown type */ -static struct ab8500_battery_type bat_type_thermistor_unknown = { - .resis_high = 0, - .resis_low = 0, -}; - static const struct ab8500_bm_capacity_levels cap_levels = { .critical = 2, .low = 10, @@ -136,7 +130,6 @@ struct ab8500_bm_data ab8500_bm_data = { .enable_overshoot = false, .fg_res = 100, .cap_levels = &cap_levels, - .bat_type = &bat_type_thermistor_unknown, .interval_charging = 5, .interval_not_charging = 120, .maxi = &ab8500_maxi_params, @@ -214,6 +207,13 @@ int ab8500_bm_of_probe(struct power_supply *psy, bi->resist_table_size = ARRAY_SIZE(temp_to_batres_tbl_thermistor); } + /* The default battery is emulated by a resistor at 7K */ + if (bi->bti_resistance_ohm < 0 || + bi->bti_resistance_tolerance < 0) { + bi->bti_resistance_ohm = 7000; + bi->bti_resistance_tolerance = 20; + } + if (!bi->ocv_table[0]) { /* Default capacity table at say 25 degrees Celsius */ bi->ocv_temp[0] = 25; diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 2a6fc151210c..b7e842dff567 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -237,8 +237,8 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di) */ static int ab8500_btemp_id(struct ab8500_btemp *di) { + struct power_supply_battery_info *bi = di->bm->bi; int res; - u8 i; di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA; @@ -248,13 +248,11 @@ static int ab8500_btemp_id(struct ab8500_btemp *di) return -ENXIO; } - if ((res <= di->bm->bat_type->resis_high) && - (res >= di->bm->bat_type->resis_low)) { - dev_info(di->dev, "Battery detected on BATTEMP" - " low %d < res %d < high: %d" - " index: %d\n", - di->bm->bat_type->resis_low, res, - di->bm->bat_type->resis_high, i); + if (power_supply_battery_bti_in_range(bi, res)) { + dev_info(di->dev, "Battery detected on BATCTRL (pin C3)" + " resistance %d Ohm = %d Ohm +/- %d%%\n", + res, bi->bti_resistance_ohm, + bi->bti_resistance_tolerance); } else { dev_warn(di->dev, "Battery identified as unknown" ", resistance %d Ohm\n", res); diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 0227e800c58d..f2ff3103e0d0 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2241,10 +2241,6 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data) if (!di->flags.batt_id_received && (bi && (bi->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN))) { - const struct ab8500_battery_type *b; - - b = di->bm->bat_type; - di->flags.batt_id_received = true; di->bat_cap.max_mah_design = diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index e3d6d3ff492a..3d5047d3fe99 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -607,7 +607,9 @@ int power_supply_get_battery_info(struct power_supply *psy, info->temp_min = INT_MIN; info->temp_max = INT_MAX; info->factory_internal_resistance_uohm = -EINVAL; - info->resist_table = NULL; + info->resist_table = NULL; + info->bti_resistance_ohm = -EINVAL; + info->bti_resistance_tolerance = -EINVAL; for (index = 0; index < POWER_SUPPLY_OCV_TEMP_MAX; index++) { info->ocv_table[index] = NULL; @@ -938,6 +940,28 @@ int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, } EXPORT_SYMBOL_GPL(power_supply_batinfo_ocv2cap); +bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info, + int resistance) +{ + int low, high; + + /* Nothing like this can be checked */ + if (info->bti_resistance_ohm <= 0) + return false; + + /* This will be extremely strict and unlikely to work */ + if (info->bti_resistance_tolerance <= 0) + return (info->bti_resistance_ohm == resistance); + + low = info->bti_resistance_ohm - + (info->bti_resistance_ohm * info->bti_resistance_tolerance) / 100; + high = info->bti_resistance_ohm + + (info->bti_resistance_ohm * info->bti_resistance_tolerance) / 100; + + return ((resistance >= low) && (resistance <= high)); +} +EXPORT_SYMBOL_GPL(power_supply_battery_bti_in_range); + int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index f8601598d3d3..7fdc03cf2285 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -498,6 +498,14 @@ struct power_supply_maintenance_charge_table { * by temperature: highest temperature with lowest resistance first, lowest * temperature with highest resistance last. * @resist_table_size: the number of items in the resist_table. + * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance + * in ohms for this battery, if an identification resistor is mounted + * between a third battery terminal and ground. This scheme is used by a lot + * of mobile device batteries. + * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance, + * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the + * tolerance is 10% we will detect a proper battery if the BTI resistance + * is between 6300 and 7700 Ohm. * * This is the recommended struct to manage static battery parameters, * populated by power_supply_get_battery_info(). Most platform drivers should @@ -624,6 +632,8 @@ struct power_supply_battery_info { int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; }; extern struct atomic_notifier_head power_supply_notifier; @@ -667,6 +677,8 @@ power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table int table_len, int temp); extern struct power_supply_maintenance_charge_table * power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index); +extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info, + int resistance); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); int power_supply_get_property_from_supplier(struct power_supply *psy, @@ -684,6 +696,7 @@ power_supply_supports_maintenance_charging(struct power_supply_battery_info *inf return (mt != NULL); } + #ifdef CONFIG_POWER_SUPPLY extern int power_supply_is_system_supplied(void); #else -- cgit v1.2.3-71-gd317 From e9e7d165b4b0413c5b7db74515e4981a226b78a0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 26 Feb 2022 00:27:58 +0100 Subject: power: supply: Support VBAT-to-Ri lookup tables In Samsung devices, the method used to compensate for temperature, age, load etc is by way of VBAT to Ri tables, which correlates the battery voltage under load (VBAT) to an internal resistance (Ri). Using this Ri and a measurement of the current out of the battery (IBAT) the open circuit voltage (OCV) can be calculated as: OCV = VBAT - (Ri * IBAT) The details are described in comments to struct power_supply_battery_info in the commit. Since not all batteries supply this VBAT-to-Ri data, the fallback method to use the temperature-to-Ri lookup table can also be used as a fallback. Add two helper functions to check if we have the tables needed for using power_supply_vbat2ri() or power_supply_temp2resist_simple() respectively, so capacity estimation code can choose which one to employ. Signed-off-by: Linus Walleij Signed-off-by: Sebastian Reichel --- drivers/power/supply/power_supply_core.c | 67 +++++++++++++++++- include/linux/power_supply.h | 113 ++++++++++++++++++++++++++++++- 2 files changed, 177 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 3d5047d3fe99..fb0b3870566e 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -814,7 +814,7 @@ EXPORT_SYMBOL_GPL(power_supply_put_battery_info); /** * power_supply_temp2resist_simple() - find the battery internal resistance - * percent + * percent from temperature * @table: Pointer to battery resistance temperature table * @table_len: The table length * @temp: Current temperature @@ -851,6 +851,71 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t } EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple); +/** + * power_supply_vbat2ri() - find the battery internal resistance + * from the battery voltage + * @info: The battery information container + * @table: Pointer to battery resistance temperature table + * @vbat_uv: The battery voltage in microvolt + * @charging: If we are charging (true) or not (false) + * + * This helper function is used to look up battery internal resistance + * according to current battery voltage. Depending on whether the battery + * is currently charging or not, different resistance will be returned. + * + * Returns the internal resistance in microohm or negative error code. + */ +int power_supply_vbat2ri(struct power_supply_battery_info *info, + int vbat_uv, bool charging) +{ + struct power_supply_vbat_ri_table *vbat2ri; + int table_len; + int i, high, low; + + /* + * If we are charging, and the battery supplies a separate table + * for this state, we use that in order to compensate for the + * charging voltage. Otherwise we use the main table. + */ + if (charging && info->vbat2ri_charging) { + vbat2ri = info->vbat2ri_charging; + table_len = info->vbat2ri_charging_size; + } else { + vbat2ri = info->vbat2ri_discharging; + table_len = info->vbat2ri_discharging_size; + } + + /* + * If no tables are specified, or if we are above the highest voltage in + * the voltage table, just return the factory specified internal resistance. + */ + if (!vbat2ri || (table_len <= 0) || (vbat_uv > vbat2ri[0].vbat_uv)) { + if (charging && (info->factory_internal_resistance_charging_uohm > 0)) + return info->factory_internal_resistance_charging_uohm; + else + return info->factory_internal_resistance_uohm; + } + + /* Break loop at table_len - 1 because that is the highest index */ + for (i = 0; i < table_len - 1; i++) + if (vbat_uv > vbat2ri[i].vbat_uv) + break; + + /* The library function will deal with high == low */ + if ((i == 0) || (i == (table_len - 1))) + high = i; + else + high = i - 1; + low = i; + + return fixp_linear_interpolate(vbat2ri[low].vbat_uv, + vbat2ri[low].ri_uohm, + vbat2ri[high].vbat_uv, + vbat2ri[high].ri_uohm, + vbat_uv); +} +EXPORT_SYMBOL_GPL(power_supply_vbat2ri); + struct power_supply_maintenance_charge_table * power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index) diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 7fdc03cf2285..cb380c1d9459 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -349,6 +349,11 @@ struct power_supply_resistance_temp_table { int resistance; /* internal resistance percent */ }; +struct power_supply_vbat_ri_table { + int vbat_uv; /* Battery voltage in microvolt */ + int ri_uohm; /* Internal resistance in microohm */ +}; + /** * struct power_supply_maintenance_charge_table - setting for maintenace charging * @charge_current_max_ua: maintenance charging current that is used to keep @@ -460,7 +465,14 @@ struct power_supply_maintenance_charge_table { * @factory_internal_resistance_uohm: the internal resistance of the battery * at fabrication time, expressed in microohms. This resistance will vary * depending on the lifetime and charge of the battery, so this is just a - * nominal ballpark figure. + * nominal ballpark figure. This internal resistance is given for the state + * when the battery is discharging. + * @factory_internal_resistance_charging_uohm: the internal resistance of the + * battery at fabrication time while charging, expressed in microohms. + * The charging process will affect the internal resistance of the battery + * so this value provides a better resistance under these circumstances. + * This resistance will vary depending on the lifetime and charge of the + * battery, so this is just a nominal ballpark figure. * @ocv_temp: array indicating the open circuit voltage (OCV) capacity * temperature indices. This is an array of temperatures in degrees Celsius * indicating which capacity table to use for a certain temperature, since @@ -498,6 +510,21 @@ struct power_supply_maintenance_charge_table { * by temperature: highest temperature with lowest resistance first, lowest * temperature with highest resistance last. * @resist_table_size: the number of items in the resist_table. + * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT) + * to internal resistance (Ri). The resistance is given in microohm for the + * corresponding voltage in microvolts. The internal resistance is used to + * determine the open circuit voltage so that we can determine the capacity + * of the battery. These voltages to resistance tables apply when the battery + * is discharging. The table must be ordered descending by voltage: highest + * voltage first. + * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging + * table. + * @vbat2ri_charging: same function as vbat2ri_discharging but for the state + * when the battery is charging. Being under charge changes the battery's + * internal resistance characteristics so a separate table is needed.* + * The table must be ordered descending by voltage: highest voltage first. + * @vbat2ri_charging_size: the number of items in the vbat2ri_charging + * table. * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance * in ohms for this battery, if an identification resistor is mounted * between a third battery terminal and ground. This scheme is used by a lot @@ -512,7 +539,9 @@ struct power_supply_maintenance_charge_table { * use these for consistency. * * Its field names must correspond to elements in enum power_supply_property. - * The default field value is -EINVAL. + * The default field value is -EINVAL or NULL for pointers. + * + * CC/CV CHARGING: * * The charging parameters here assume a CC/CV charging scheme. This method * is most common with Lithium Ion batteries (other methods are possible) and @@ -597,6 +626,66 @@ struct power_supply_maintenance_charge_table { * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or * explosions. * + * DETERMINING BATTERY CAPACITY: + * + * Several members of the struct deal with trying to determine the remaining + * capacity in the battery, usually as a percentage of charge. In practice + * many chargers uses a so-called fuel gauge or coloumb counter that measure + * how much charge goes into the battery and how much goes out (+/- leak + * consumption). This does not help if we do not know how much capacity the + * battery has to begin with, such as when it is first used or was taken out + * and charged in a separate charger. Therefore many capacity algorithms use + * the open circuit voltage with a look-up table to determine the rough + * capacity of the battery. The open circuit voltage can be conceptualized + * with an ideal voltage source (V) in series with an internal resistance (Ri) + * like this: + * + * +-------> IBAT >----------------+ + * | ^ | + * [ ] Ri | | + * | | VBAT | + * o <---------- | | + * +| ^ | [ ] Rload + * .---. | | | + * | V | | OCV | | + * '---' | | | + * | | | | + * GND +-------------------------------+ + * + * If we disconnect the load (here simplified as a fixed resistance Rload) + * and measure VBAT with a infinite impedance voltage meter we will get + * VBAT = OCV and this assumption is sometimes made even under load, assuming + * Rload is insignificant. However this will be of dubious quality because the + * load is rarely that small and Ri is strongly nonlinear depending on + * temperature and how much capacity is left in the battery due to the + * chemistry involved. + * + * In many practical applications we cannot just disconnect the battery from + * the load, so instead we often try to measure the instantaneous IBAT (the + * current out from the battery), estimate the Ri and thus calculate the + * voltage drop over Ri and compensate like this: + * + * OCV = VBAT - (IBAT * Ri) + * + * The tables vbat2ri_discharging and vbat2ri_charging are used to determine + * (by interpolation) the Ri from the VBAT under load. These curves are highly + * nonlinear and may need many datapoints but can be found in datasheets for + * some batteries. This gives the compensated open circuit voltage (OCV) for + * the battery even under load. Using this method will also compensate for + * temperature changes in the environment: this will also make the internal + * resistance change, and it will affect the VBAT under load, so correlating + * VBAT to Ri takes both remaining capacity and temperature into consideration. + * + * Alternatively a manufacturer can specify how the capacity of the battery + * is dependent on the battery temperature which is the main factor affecting + * Ri. As we know all checmical reactions are faster when it is warm and slower + * when it is cold. You can put in 1500mAh and only get 800mAh out before the + * voltage drops too low for example. This effect is also highly nonlinear and + * the purpose of the table resist_table: this will take a temperature and + * tell us how big percentage of Ri the specified temperature correlates to. + * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees + * Celsius. + * * The power supply class itself doesn't use this struct as of now. */ @@ -621,6 +710,7 @@ struct power_supply_battery_info { int alert_high_temp_charge_current_ua; int alert_high_temp_charge_voltage_uv; int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX]; int temp_ambient_alert_min; int temp_ambient_alert_max; @@ -632,6 +722,10 @@ struct power_supply_battery_info { int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; + struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; int bti_resistance_ohm; int bti_resistance_tolerance; }; @@ -675,6 +769,8 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, extern int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, int table_len, int temp); +extern int power_supply_vbat2ri(struct power_supply_battery_info *info, + int vbat_uv, bool charging); extern struct power_supply_maintenance_charge_table * power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index); extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info, @@ -696,6 +792,19 @@ power_supply_supports_maintenance_charging(struct power_supply_battery_info *inf return (mt != NULL); } +static inline bool +power_supply_supports_vbat2ri(struct power_supply_battery_info *info) +{ + return ((info->vbat2ri_discharging != NULL) && + info->vbat2ri_discharging_size > 0); +} + +static inline bool +power_supply_supports_temp2ri(struct power_supply_battery_info *info) +{ + return ((info->resist_table != NULL) && + info->resist_table_size > 0); +} #ifdef CONFIG_POWER_SUPPLY extern int power_supply_is_system_supplied(void); -- cgit v1.2.3-71-gd317 From 342479c86d3e8f9e946a07ff0cafbd36511ae30a Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Sun, 30 Jan 2022 09:21:04 +0800 Subject: soc: mediatek: pm-domains: Add support for mt8195 Add domain control data including bus protection data size change due to more protection steps in mt8195. Signed-off-by: Chun-Jie Chen Reviewed-by: Chen-Yu Tsai Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220130012104.5292-6-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8195-pm-domains.h | 613 +++++++++++++++++++++++++++++++ drivers/soc/mediatek/mtk-pm-domains.c | 5 + drivers/soc/mediatek/mtk-pm-domains.h | 2 +- include/linux/soc/mediatek/infracfg.h | 82 +++++ 4 files changed, 701 insertions(+), 1 deletion(-) create mode 100644 drivers/soc/mediatek/mt8195-pm-domains.h (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h new file mode 100644 index 000000000000..938f4d51f5ae --- /dev/null +++ b/drivers/soc/mediatek/mt8195-pm-domains.h @@ -0,0 +1,613 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef __SOC_MEDIATEK_MT8195_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT8195_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include + +/* + * MT8195 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { + [MT8195_POWER_DOMAIN_PCIE_MAC_P0] = { + .name = "pcie_mac_p0", + .sta_mask = BIT(11), + .ctl_offs = 0x328, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0, + MT8195_TOP_AXI_PROT_EN_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + }, + [MT8195_POWER_DOMAIN_PCIE_MAC_P1] = { + .name = "pcie_mac_p1", + .sta_mask = BIT(12), + .ctl_offs = 0x32C, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1, + MT8195_TOP_AXI_PROT_EN_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + }, + [MT8195_POWER_DOMAIN_PCIE_PHY] = { + .name = "pcie_phy", + .sta_mask = BIT(13), + .ctl_offs = 0x330, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY] = { + .name = "ssusb_pcie_phy", + .sta_mask = BIT(14), + .ctl_offs = 0x334, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_CSI_RX_TOP] = { + .name = "csi_rx_top", + .sta_mask = BIT(18), + .ctl_offs = 0x3C4, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_ETHER] = { + .name = "ether", + .sta_mask = BIT(3), + .ctl_offs = 0x344, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_ADSP] = { + .name = "adsp", + .sta_mask = BIT(10), + .ctl_offs = 0x360, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_ADSP, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + }, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = BIT(8), + .ctl_offs = 0x358, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_AUDIO, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_MFG0] = { + .name = "mfg0", + .sta_mask = BIT(1), + .ctl_offs = 0x300, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8195_POWER_DOMAIN_MFG1] = { + .name = "mfg1", + .sta_mask = BIT(2), + .ctl_offs = 0x304, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_MFG1, + MT8195_TOP_AXI_PROT_EN_1_SET, + MT8195_TOP_AXI_PROT_EN_1_CLR, + MT8195_TOP_AXI_PROT_EN_1_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MFG1_2ND, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG2] = { + .name = "mfg2", + .sta_mask = BIT(3), + .ctl_offs = 0x308, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG3] = { + .name = "mfg3", + .sta_mask = BIT(4), + .ctl_offs = 0x30C, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG4] = { + .name = "mfg4", + .sta_mask = BIT(5), + .ctl_offs = 0x310, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG5] = { + .name = "mfg5", + .sta_mask = BIT(6), + .ctl_offs = 0x314, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_MFG6] = { + .name = "mfg6", + .sta_mask = BIT(7), + .ctl_offs = 0x318, + .pwr_sta_offs = 0x174, + .pwr_sta2nd_offs = 0x178, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VPPSYS0] = { + .name = "vppsys0", + .sta_mask = BIT(11), + .ctl_offs = 0x364, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDOSYS0] = { + .name = "vdosys0", + .sta_mask = BIT(13), + .ctl_offs = 0x36C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_SET, + MT8195_TOP_AXI_PROT_EN_CLR, + MT8195_TOP_AXI_PROT_EN_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, + MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VPPSYS1] = { + .name = "vppsys1", + .sta_mask = BIT(12), + .ctl_offs = 0x368, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDOSYS1] = { + .name = "vdosys1", + .sta_mask = BIT(14), + .ctl_offs = 0x370, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_DP_TX] = { + .name = "dp_tx", + .sta_mask = BIT(16), + .ctl_offs = 0x378, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_EPD_TX] = { + .name = "epd_tx", + .sta_mask = BIT(17), + .ctl_offs = 0x37C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX, + MT8195_TOP_AXI_PROT_EN_VDNR_1_SET, + MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR, + MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_HDMI_TX] = { + .name = "hdmi_tx", + .sta_mask = BIT(18), + .ctl_offs = 0x380, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8195_POWER_DOMAIN_WPESYS] = { + .name = "wpesys", + .sta_mask = BIT(15), + .ctl_offs = 0x374, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_WPESYS, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + }, + [MT8195_POWER_DOMAIN_VDEC0] = { + .name = "vdec0", + .sta_mask = BIT(20), + .ctl_offs = 0x388, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VDEC1] = { + .name = "vdec1", + .sta_mask = BIT(21), + .ctl_offs = 0x38C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VDEC2] = { + .name = "vdec2", + .sta_mask = BIT(22), + .ctl_offs = 0x390, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = BIT(23), + .ctl_offs = 0x394, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_VENC_CORE1] = { + .name = "venc_core1", + .sta_mask = BIT(24), + .ctl_offs = 0x398, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_IMG] = { + .name = "img", + .sta_mask = BIT(29), + .ctl_offs = 0x3AC, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_DIP] = { + .name = "dip", + .sta_mask = BIT(30), + .ctl_offs = 0x3B0, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_IPE] = { + .name = "ipe", + .sta_mask = BIT(31), + .ctl_offs = 0x3B4, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_IPE, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_IPE, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM] = { + .name = "cam", + .sta_mask = BIT(25), + .ctl_offs = 0x39C, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_2_CAM, + MT8195_TOP_AXI_PROT_EN_2_SET, + MT8195_TOP_AXI_PROT_EN_2_CLR, + MT8195_TOP_AXI_PROT_EN_2_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_1_CAM, + MT8195_TOP_AXI_PROT_EN_1_SET, + MT8195_TOP_AXI_PROT_EN_1_CLR, + MT8195_TOP_AXI_PROT_EN_1_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND, + MT8195_TOP_AXI_PROT_EN_MM_SET, + MT8195_TOP_AXI_PROT_EN_MM_CLR, + MT8195_TOP_AXI_PROT_EN_MM_STA1), + BUS_PROT_WR(MT8195_TOP_AXI_PROT_EN_MM_2_CAM, + MT8195_TOP_AXI_PROT_EN_MM_2_SET, + MT8195_TOP_AXI_PROT_EN_MM_2_CLR, + MT8195_TOP_AXI_PROT_EN_MM_2_STA1), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_RAWA] = { + .name = "cam_rawa", + .sta_mask = BIT(26), + .ctl_offs = 0x3A0, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_RAWB] = { + .name = "cam_rawb", + .sta_mask = BIT(27), + .ctl_offs = 0x3A4, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8195_POWER_DOMAIN_CAM_MRAW] = { + .name = "cam_mraw", + .sta_mask = BIT(28), + .ctl_offs = 0x3A8, + .pwr_sta_offs = 0x16c, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = GENMASK(8, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, +}; + +static const struct scpsys_soc_data mt8195_scpsys_data = { + .domains_data = scpsys_domain_data_mt8195, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8195), +}; + +#endif /* __SOC_MEDIATEK_MT8195_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index ad06b6f90435..61973a306e97 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -20,6 +20,7 @@ #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" #include "mt8192-pm-domains.h" +#include "mt8195-pm-domains.h" #define MTK_POLL_DELAY_US 10 #define MTK_POLL_TIMEOUT USEC_PER_SEC @@ -569,6 +570,10 @@ static const struct of_device_id scpsys_of_match[] = { .compatible = "mediatek,mt8192-power-controller", .data = &mt8192_scpsys_data, }, + { + .compatible = "mediatek,mt8195-power-controller", + .data = &mt8195_scpsys_data, + }, { } }; diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index c233ed828f86..daa24e890dd4 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -37,7 +37,7 @@ #define PWR_STATUS_AUDIO BIT(24) #define PWR_STATUS_USB BIT(25) -#define SPM_MAX_BUS_PROT_DATA 5 +#define SPM_MAX_BUS_PROT_DATA 6 #define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \ .bus_prot_mask = (_mask), \ diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 4615a228da51..d858e0bab7a2 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,88 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8195_TOP_AXI_PROT_EN_STA1 0x228 +#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258 +#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0 +#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4 +#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8 +#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac +#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4 +#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8 +#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec +#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714 +#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718 +#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724 +#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84 +#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88 +#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8 +#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc +#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0 +#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8 + +#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11) +#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19) +#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22) +#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7) +#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14)) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4)) +#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12)) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26) +#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19)) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21) + #define MT8192_TOP_AXI_PROT_EN_STA1 0x228 #define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258 #define MT8192_TOP_AXI_PROT_EN_SET 0x2a0 -- cgit v1.2.3-71-gd317 From 88590cbc17033c86c8591d9f22401325961a8a59 Mon Sep 17 00:00:00 2001 From: Chun-Jie Chen Date: Tue, 15 Feb 2022 18:49:17 +0800 Subject: soc: mediatek: pm-domains: Add support for mt8186 Add power domain control data in mt8186. Signed-off-by: Chun-Jie Chen Link: https://lore.kernel.org/r/20220215104917.5726-3-chun-jie.chen@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mt8186-pm-domains.h | 344 +++++++++++++++++++++++++++++++ drivers/soc/mediatek/mtk-pm-domains.c | 5 + include/linux/soc/mediatek/infracfg.h | 48 +++++ 3 files changed, 397 insertions(+) create mode 100644 drivers/soc/mediatek/mt8186-pm-domains.h (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h new file mode 100644 index 000000000000..bf2dd0cdc3a8 --- /dev/null +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef __SOC_MEDIATEK_MT8186_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT8186_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include + +/* + * MT8186 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { + [MT8186_POWER_DOMAIN_MFG0] = { + .name = "mfg0", + .sta_mask = BIT(2), + .ctl_offs = 0x308, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, + }, + [MT8186_POWER_DOMAIN_MFG1] = { + .name = "mfg1", + .sta_mask = BIT(3), + .ctl_offs = 0x30c, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_MFG1_STEP3, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_MFG2] = { + .name = "mfg2", + .sta_mask = BIT(4), + .ctl_offs = 0x310, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_MFG3] = { + .name = "mfg3", + .sta_mask = BIT(5), + .ctl_offs = 0x314, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_SSUSB] = { + .name = "ssusb", + .sta_mask = BIT(20), + .ctl_offs = 0x9F0, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_SSUSB_P1] = { + .name = "ssusb_p1", + .sta_mask = BIT(19), + .ctl_offs = 0x9F4, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_DIS] = { + .name = "dis", + .sta_mask = BIT(21), + .ctl_offs = 0x354, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_DIS_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + }, + }, + [MT8186_POWER_DOMAIN_IMG] = { + .name = "img", + .sta_mask = BIT(13), + .ctl_offs = 0x334, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_IMG2] = { + .name = "img2", + .sta_mask = BIT(14), + .ctl_offs = 0x338, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_IPE] = { + .name = "ipe", + .sta_mask = BIT(15), + .ctl_offs = 0x33C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM] = { + .name = "cam", + .sta_mask = BIT(23), + .ctl_offs = 0x35C, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM_RAWA] = { + .name = "cam_rawa", + .sta_mask = BIT(24), + .ctl_offs = 0x360, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CAM_RAWB] = { + .name = "cam_rawb", + .sta_mask = BIT(25), + .ctl_offs = 0x364, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = BIT(18), + .ctl_offs = 0x348, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = BIT(16), + .ctl_offs = 0x340, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_WPE] = { + .name = "wpe", + .sta_mask = BIT(0), + .ctl_offs = 0x3F8, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1, + MT8186_TOP_AXI_PROT_EN_2_SET, + MT8186_TOP_AXI_PROT_EN_2_CLR, + MT8186_TOP_AXI_PROT_EN_2_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2, + MT8186_TOP_AXI_PROT_EN_2_SET, + MT8186_TOP_AXI_PROT_EN_2_CLR, + MT8186_TOP_AXI_PROT_EN_2_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_CONN_ON] = { + .name = "conn_on", + .sta_mask = BIT(1), + .ctl_offs = 0x304, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1, + MT8186_TOP_AXI_PROT_EN_1_SET, + MT8186_TOP_AXI_PROT_EN_1_CLR, + MT8186_TOP_AXI_PROT_EN_1_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4, + MT8186_TOP_AXI_PROT_EN_SET, + MT8186_TOP_AXI_PROT_EN_CLR, + MT8186_TOP_AXI_PROT_EN_STA), + }, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, + [MT8186_POWER_DOMAIN_CSIRX_TOP] = { + .name = "csirx_top", + .sta_mask = BIT(6), + .ctl_offs = 0x318, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_AO] = { + .name = "adsp_ao", + .sta_mask = BIT(17), + .ctl_offs = 0x9FC, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_INFRA] = { + .name = "adsp_infra", + .sta_mask = BIT(10), + .ctl_offs = 0x9F8, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + }, + [MT8186_POWER_DOMAIN_ADSP_TOP] = { + .name = "adsp_top", + .sta_mask = BIT(31), + .ctl_offs = 0x3E4, + .pwr_sta_offs = 0x16C, + .pwr_sta2nd_offs = 0x170, + .sram_pdn_bits = BIT(8), + .sram_pdn_ack_bits = BIT(12), + .bp_infracfg = { + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1, + MT8186_TOP_AXI_PROT_EN_3_SET, + MT8186_TOP_AXI_PROT_EN_3_CLR, + MT8186_TOP_AXI_PROT_EN_3_STA), + BUS_PROT_WR_IGN(MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2, + MT8186_TOP_AXI_PROT_EN_3_SET, + MT8186_TOP_AXI_PROT_EN_3_CLR, + MT8186_TOP_AXI_PROT_EN_3_STA), + }, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + }, +}; + +static const struct scpsys_soc_data mt8186_scpsys_data = { + .domains_data = scpsys_domain_data_mt8186, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8186), +}; + +#endif /* __SOC_MEDIATEK_MT8186_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 61973a306e97..5ced254b082b 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -19,6 +19,7 @@ #include "mt8167-pm-domains.h" #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" +#include "mt8186-pm-domains.h" #include "mt8192-pm-domains.h" #include "mt8195-pm-domains.h" @@ -566,6 +567,10 @@ static const struct of_device_id scpsys_of_match[] = { .compatible = "mediatek,mt8183-power-controller", .data = &mt8183_scpsys_data, }, + { + .compatible = "mediatek,mt8186-power-controller", + .data = &mt8186_scpsys_data, + }, { .compatible = "mediatek,mt8192-power-controller", .data = &mt8192_scpsys_data, diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index d858e0bab7a2..8a1c2040a28e 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -140,6 +140,54 @@ #define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13) #define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21) +#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0) +#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4) +#define MT8186_TOP_AXI_PROT_EN_STA (0x228) +#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8) +#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC) +#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258) +#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0) +#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4) +#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C) +#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8) +#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC) +#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8) + +/* MFG1 */ +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25)) +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29)) +/* DIS */ +#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10)) +/* IMG */ +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23)) +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15)) +/* IPE */ +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24)) +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16)) +/* CAM */ +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13)) +/* VENC */ +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31)) +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19)) +/* VDEC */ +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30)) +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17)) +/* WPE */ +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17)) +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16)) +/* CONN_ON */ +#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16)) +/* ADSP_TOP */ +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0)) + #define MT8183_TOP_AXI_PROT_EN_STA1 0x228 #define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258 #define MT8183_TOP_AXI_PROT_EN_SET 0x2a0 -- cgit v1.2.3-71-gd317 From e65b831a1e191caff3fc0d06bc7019cdaf8f868e Mon Sep 17 00:00:00 2001 From: Qinghua Jin Date: Fri, 7 Jan 2022 10:22:58 +0800 Subject: nvme-fc: fix a typo subsytem -> subsystem Signed-off-by: Qinghua Jin Signed-off-by: Christoph Hellwig --- include/linux/nvme-fc-driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index cb909edb76c4..5358a5facdee 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -721,7 +721,7 @@ enum { * * Fields with static values for the port. Initialized by the * port_info struct supplied to the registration call. - * @port_num: NVME-FC transport subsytem port number + * @port_num: NVME-FC transport subsystem port number * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * @private: pointer to memory allocated alongside the local port -- cgit v1.2.3-71-gd317 From bd83fe6f2cd2133beaac7c423fd36c3515048fc8 Mon Sep 17 00:00:00 2001 From: Alan Adamson Date: Thu, 3 Feb 2022 00:11:53 -0800 Subject: nvme: add verbose error logging Improves logging of NVMe errors. If NVME_VERBOSE_ERRORS is configured, a verbose description of the error is logged, otherwise only status codes/bits is logged. Signed-off-by: Chaitanya Kulkarni [kch]: fix several nits, cosmetics, and trim down code. Signed-off-by: Martin K. Petersen Signed-off-by: Alan Adamson Reviewed-by: Himanshu Madhani Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/Kconfig | 8 ++ drivers/nvme/host/Makefile | 2 +- drivers/nvme/host/constants.c | 185 ++++++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/core.c | 33 ++++++++ drivers/nvme/host/nvme.h | 19 +++++ include/linux/nvme.h | 1 + 6 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 drivers/nvme/host/constants.c (limited to 'include/linux') diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index dc0450ca23a3..d6d056963c06 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -24,6 +24,14 @@ config NVME_MULTIPATH /dev/nvmeXnY device will show up for each NVMe namespace, even if it is accessible through multiple controllers. +config NVME_VERBOSE_ERRORS + bool "NVMe verbose error reporting" + depends on NVME_CORE + help + This option enables verbose reporting for NVMe errors. The + error translation table will grow the kernel image size by + about 4 KB. + config NVME_HWMON bool "NVMe hardware monitoring" depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON) diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index dfaacd472e5d..476c5c988496 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o -nvme-core-y := core.o ioctl.o +nvme-core-y := core.o ioctl.o constants.o nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c new file mode 100644 index 000000000000..7d49eb34b348 --- /dev/null +++ b/drivers/nvme/host/constants.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM Express device driver verbose errors + * Copyright (c) 2022, Oracle and/or its affiliates + */ + +#include +#include "nvme.h" + +#ifdef CONFIG_NVME_VERBOSE_ERRORS +static const char * const nvme_ops[] = { + [nvme_cmd_flush] = "Flush", + [nvme_cmd_write] = "Write", + [nvme_cmd_read] = "Read", + [nvme_cmd_write_uncor] = "Write Uncorrectable", + [nvme_cmd_compare] = "Compare", + [nvme_cmd_write_zeroes] = "Write Zeros", + [nvme_cmd_dsm] = "Dataset Management", + [nvme_cmd_verify] = "Verify", + [nvme_cmd_resv_register] = "Reservation Register", + [nvme_cmd_resv_report] = "Reservation Report", + [nvme_cmd_resv_acquire] = "Reservation Acquire", + [nvme_cmd_resv_release] = "Reservation Release", + [nvme_cmd_zone_mgmt_send] = "Zone Management Send", + [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", + [nvme_cmd_zone_append] = "Zone Management Append", +}; + +static const char * const nvme_admin_ops[] = { + [nvme_admin_delete_sq] = "Delete SQ", + [nvme_admin_create_sq] = "Create SQ", + [nvme_admin_get_log_page] = "Get Log Page", + [nvme_admin_delete_cq] = "Delete CQ", + [nvme_admin_create_cq] = "Create CQ", + [nvme_admin_identify] = "Identify", + [nvme_admin_abort_cmd] = "Abort Command", + [nvme_admin_set_features] = "Set Features", + [nvme_admin_get_features] = "Get Features", + [nvme_admin_async_event] = "Async Event", + [nvme_admin_ns_mgmt] = "Namespace Management", + [nvme_admin_activate_fw] = "Activate Firmware", + [nvme_admin_download_fw] = "Download Firmware", + [nvme_admin_dev_self_test] = "Device Self Test", + [nvme_admin_ns_attach] = "Namespace Attach", + [nvme_admin_keep_alive] = "Keep Alive", + [nvme_admin_directive_send] = "Directive Send", + [nvme_admin_directive_recv] = "Directive Receive", + [nvme_admin_virtual_mgmt] = "Virtual Management", + [nvme_admin_nvme_mi_send] = "NVMe Send MI", + [nvme_admin_nvme_mi_recv] = "NVMe Receive MI", + [nvme_admin_dbbuf] = "Doorbell Buffer Config", + [nvme_admin_format_nvm] = "Format NVM", + [nvme_admin_security_send] = "Security Send", + [nvme_admin_security_recv] = "Security Receive", + [nvme_admin_sanitize_nvm] = "Sanitize NVM", + [nvme_admin_get_lba_status] = "Get LBA Status", +}; + +static const char * const nvme_statuses[] = { + [NVME_SC_SUCCESS] = "Success", + [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode", + [NVME_SC_INVALID_FIELD] = "Invalid Field in Command", + [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict", + [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error", + [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification", + [NVME_SC_INTERNAL] = "Internal Error", + [NVME_SC_ABORT_REQ] = "Command Abort Requested", + [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion", + [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command", + [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command", + [NVME_SC_INVALID_NS] = "Invalid Namespace or Format", + [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error", + [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor", + [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors", + [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid", + [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid", + [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid", + [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer", + [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid", + [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded", + [NVME_SC_OP_DENIED] = "Operation Denied", + [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid", + [NVME_SC_RESERVED] = "Reserved", + [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format", + [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired", + [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid", + [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort", + [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed", + [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress", + [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid", + [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB", + [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected", + [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted", + [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error", + [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set", + [NVME_SC_LBA_RANGE] = "LBA Out of Range", + [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded", + [NVME_SC_NS_NOT_READY] = "Namespace Not Ready", + [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict", + [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress", + [NVME_SC_CQ_INVALID] = "Completion Queue Invalid", + [NVME_SC_QID_INVALID] = "Invalid Queue Identifier", + [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size", + [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded", + [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */ + [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded", + [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot", + [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image", + [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector", + [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page", + [NVME_SC_INVALID_FORMAT] = "Invalid Format", + [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset", + [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion", + [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable", + [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable", + [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific", + [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset", + [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset", + [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation", + [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited", + [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range", + [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity", + [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable", + [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached", + [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private", + [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached", + [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported", + [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid", + [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress", + [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited", + [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier", + [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State", + [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources", + [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier", + [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited", + [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid", + [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed", + [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", + [NVME_SC_INVALID_PI] = "Invalid Protection Information", + [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", + [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported", + [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", + [NVME_SC_ZONE_FULL] = "Zone Is Full", + [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", + [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline", + [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write", + [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones", + [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones", + [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition", + [NVME_SC_WRITE_FAULT] = "Write Fault", + [NVME_SC_READ_ERROR] = "Unrecovered Read Error", + [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error", + [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error", + [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error", + [NVME_SC_COMPARE_FAILED] = "Compare Failure", + [NVME_SC_ACCESS_DENIED] = "Access Denied", + [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block", + [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss", + [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible", + [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition", + [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error", +}; + +const unsigned char *nvme_get_error_status_str(u16 status) +{ + status &= 0x7ff; + if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) + return nvme_statuses[status & 0x7ff]; + return "Unknown"; +} + +const unsigned char *nvme_get_opcode_str(u8 opcode) +{ + if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) + return nvme_ops[opcode]; + return "Unknown"; +} + +const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +{ + if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) + return nvme_admin_ops[opcode]; + return "Unknown"; +} +#endif /* CONFIG_NVME_VERBOSE_ERRORS */ diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index da0e19148177..5e55732f3ea1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -299,6 +299,37 @@ static void nvme_retry_req(struct request *req) blk_mq_delay_kick_requeue_list(req->q, delay); } +static void nvme_log_error(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + struct nvme_request *nr = nvme_req(req); + + if (ns) { + pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", + ns->disk ? ns->disk->disk_name : "?", + nvme_get_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), + (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : ""); + return; + } + + pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n", + dev_name(nr->ctrl->device), + nvme_get_admin_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : ""); +} + enum nvme_disposition { COMPLETE, RETRY, @@ -339,6 +370,8 @@ static inline void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); + if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS)) + nvme_log_error(req); nvme_end_req_zoned(req); nvme_trace_bio_complete(req); blk_mq_end_request(req, status); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index f8658f984d64..08893c04ca89 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -938,4 +938,23 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; } +#ifdef CONFIG_NVME_VERBOSE_ERRORS +const unsigned char *nvme_get_error_status_str(u16 status); +const unsigned char *nvme_get_opcode_str(u8 opcode); +const unsigned char *nvme_get_admin_opcode_str(u8 opcode); +#else /* CONFIG_NVME_VERBOSE_ERRORS */ +static inline const unsigned char *nvme_get_error_status_str(u16 status) +{ + return "I/O Error"; +} +static inline const unsigned char *nvme_get_opcode_str(u8 opcode) +{ + return "I/O Cmd"; +} +static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +{ + return "Admin Cmd"; +} +#endif /* CONFIG_NVME_VERBOSE_ERRORS */ + #endif /* _NVME_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 855dd9b3e84b..1f946e5bf7c1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1636,6 +1636,7 @@ enum { NVME_SC_HOST_ABORTED_CMD = 0x371, NVME_SC_CRD = 0x1800, + NVME_SC_MORE = 0x2000, NVME_SC_DNR = 0x4000, }; -- cgit v1.2.3-71-gd317 From 86c2457a8e8112f16af8fd10a3e1dd7a302c3c3e Mon Sep 17 00:00:00 2001 From: Martin Belanger Date: Tue, 8 Feb 2022 14:33:46 -0500 Subject: nvme: expose cntrltype and dctype through sysfs TP8010 introduces the Discovery Controller Type attribute (dctype). The dctype is returned in the response to the Identify command. This patch exposes the dctype through the sysfs. Since the dctype depends on the Controller Type (cntrltype), another attribute of the Identify response, the patch also exposes the cntrltype as well. The dctype will only be displayed for discovery controllers. A note about the naming of this attribute: Although TP8010 calls this attribute the Discovery Controller Type, note that the dctype is now part of the response to the Identify command for all controller types. I/O, Discovery, and Admin controllers all share the same Identify response PDU structure. Non-discovery controllers as well as pre-TP8010 discovery controllers will continue to set this field to 0 (which has always been the default for reserved bytes). Per TP8010, the value 0 now means "Discovery controller type is not reported" instead of "Reserved". One could argue that this definition is correct even for non-discovery controllers, and by extension, exposing it in the sysfs for non-discovery controllers is appropriate. Signed-off-by: Martin Belanger Reviewed-by: Chaitanya Kulkarni Reviewed-by: John Meneghini Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 39 +++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/nvme.h | 3 +++ include/linux/nvme.h | 10 +++++++++- 3 files changed, 51 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b3e70bb2e6dc..52a49ae5b673 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3023,6 +3023,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->max_namespaces = le32_to_cpu(id->mnan); ctrl->ctratt = le32_to_cpu(id->ctratt); + ctrl->cntrltype = id->cntrltype; + ctrl->dctype = id->dctype; + if (id->rtd3e) { /* us -> s */ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; @@ -3556,6 +3559,40 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); +static ssize_t cntrltype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_CTRL_IO] = "io\n", + [NVME_CTRL_DISC] = "discovery\n", + [NVME_CTRL_ADMIN] = "admin\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->cntrltype]); +} +static DEVICE_ATTR_RO(cntrltype); + +static ssize_t dctype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + static const char * const type[] = { + [NVME_DCTYPE_NOT_REPORTED] = "none\n", + [NVME_DCTYPE_DDC] = "ddc\n", + [NVME_DCTYPE_CDC] = "cdc\n", + }; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) + return sysfs_emit(buf, "reserved\n"); + + return sysfs_emit(buf, type[ctrl->dctype]); +} +static DEVICE_ATTR_RO(dctype); + static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_rescan_controller.attr, @@ -3577,6 +3614,8 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_reconnect_delay.attr, &dev_attr_fast_io_fail_tmo.attr, &dev_attr_kato.attr, + &dev_attr_cntrltype.attr, + &dev_attr_dctype.attr, NULL }; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 08893c04ca89..4ce48203e63f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -349,6 +349,9 @@ struct nvme_ctrl { unsigned long discard_page_busy; struct nvme_fault_inject fault_inject; + + enum nvme_ctrl_type cntrltype; + enum nvme_dctype dctype; }; enum nvme_iopolicy { diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 1f946e5bf7c1..9dbc3ef4daf7 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -43,6 +43,12 @@ enum nvme_ctrl_type { NVME_CTRL_ADMIN = 3, /* Administrative controller */ }; +enum nvme_dctype { + NVME_DCTYPE_NOT_REPORTED = 0, + NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */ + NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */ +}; + /* Address Family codes for Discovery Log Page entry ADRFAM field */ enum { NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ @@ -320,7 +326,9 @@ struct nvme_id_ctrl { __le16 icdoff; __u8 ctrattr; __u8 msdbd; - __u8 rsvd1804[244]; + __u8 rsvd1804[2]; + __u8 dctype; + __u8 rsvd1807[241]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; -- cgit v1.2.3-71-gd317 From a5081bad2eac5108593ac36b6551201f0df9e897 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Sat, 26 Feb 2022 14:56:22 +0000 Subject: net: phylink: remove phylink_set_pcs() As all users of phylink_set_pcs() have now been updated to use the mac_select_pcs() method, it can be removed from the phylink kernel API and its functionality moved into phylink_major_config(). Removing phylink_set_pcs() gives us a single approach for attaching a PCS within phylink. Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 44 ++++++++++++-------------------------------- include/linux/phylink.h | 1 - 2 files changed, 12 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 28aa23533107..8d1cd2b9ba5f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -815,8 +815,18 @@ static void phylink_major_config(struct phylink *pl, bool restart, /* If we have a new PCS, switch to the new PCS after preparing the MAC * for the change. */ - if (pcs) - phylink_set_pcs(pl, pcs); + if (pcs) { + pl->pcs = pcs; + pl->pcs_ops = pcs->ops; + + if (!pl->phylink_disable_state && + pl->cfg_link_an_mode == MLO_AN_INBAND) { + if (pcs->poll) + mod_timer(&pl->link_poll, jiffies + HZ); + else + del_timer(&pl->link_poll); + } + } phylink_mac_config(pl, state); @@ -1286,36 +1296,6 @@ struct phylink *phylink_create(struct phylink_config *config, } EXPORT_SYMBOL_GPL(phylink_create); -/** - * phylink_set_pcs() - set the current PCS for phylink to use - * @pl: a pointer to a &struct phylink returned from phylink_create() - * @pcs: a pointer to the &struct phylink_pcs - * - * Bind the MAC PCS to phylink. This may be called after phylink_create(). - * If it is desired to dynamically change the PCS, then the preferred method - * is to use mac_select_pcs(), but it may also be called in mac_prepare() - * or mac_config(). - * - * Please note that there are behavioural changes with the mac_config() - * callback if a PCS is present (denoting a newer setup) so removing a PCS - * is not supported, and if a PCS is going to be used, it must be registered - * by calling phylink_set_pcs() at the latest in the first mac_config() call. - */ -void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) -{ - pl->pcs = pcs; - pl->pcs_ops = pcs->ops; - - if (!pl->phylink_disable_state && - pl->cfg_link_an_mode == MLO_AN_INBAND) { - if (pcs->poll) - mod_timer(&pl->link_poll, jiffies + HZ); - else - del_timer(&pl->link_poll); - } -} -EXPORT_SYMBOL_GPL(phylink_set_pcs); - /** * phylink_destroy() - cleanup and destroy the phylink instance * @pl: a pointer to a &struct phylink returned from phylink_create() diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 9ef9b7047f19..223781622b33 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -532,7 +532,6 @@ void phylink_generic_validate(struct phylink_config *config, struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); -void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); -- cgit v1.2.3-71-gd317 From 989192ac6ad54acccd5dd1c058055dacd6b94b30 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:41 +0800 Subject: iommu/vt-d: Remove guest pasid related callbacks The guest pasid related callbacks are not called in the tree. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 208 ------------------------------------------- drivers/iommu/intel/pasid.c | 161 ---------------------------------- drivers/iommu/intel/pasid.h | 4 - drivers/iommu/intel/svm.c | 209 -------------------------------------------- include/linux/intel-iommu.h | 10 --- include/linux/intel-svm.h | 12 --- 6 files changed, 604 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..4f9d95067c8f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4825,13 +4825,6 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (!iommu) return -ENODEV; - if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) && - !ecap_nest(iommu->ecap)) { - dev_err(dev, "%s: iommu not support nested translation\n", - iommu->name); - return -EINVAL; - } - /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4919,185 +4912,6 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, aux_domain_remove_dev(to_dmar_domain(domain), dev); } -#ifdef CONFIG_INTEL_IOMMU_SVM -/* - * 2D array for converting and sanitizing IOMMU generic TLB granularity to - * VT-d granularity. Invalidation is typically included in the unmap operation - * as a result of DMA or VFIO unmap. However, for assigned devices guest - * owns the first level page tables. Invalidations of translation caches in the - * guest are trapped and passed down to the host. - * - * vIOMMU in the guest will only expose first level page tables, therefore - * we do not support IOTLB granularity for request without PASID (second level). - * - * For example, to find the VT-d granularity encoding for IOTLB - * type and page selective granularity within PASID: - * X: indexed by iommu cache type - * Y: indexed by enum iommu_inv_granularity - * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR] - */ - -static const int -inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = { - /* - * PASID based IOTLB invalidation: PASID selective (per PASID), - * page selective (address granularity) - */ - {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID}, - /* PASID based dev TLBs */ - {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL}, - /* PASID cache */ - {-EINVAL, -EINVAL, -EINVAL} -}; - -static inline int to_vtd_granularity(int type, int granu) -{ - return inv_type_granu_table[type][granu]; -} - -static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules) -{ - u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT; - - /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc. - * IOMMU cache invalidate API passes granu_size in bytes, and number of - * granu size in contiguous memory. - */ - return order_base_2(nr_pages); -} - -static int -intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int cache_type; - u8 bus, devfn; - u16 did, sid; - int ret = 0; - u64 size = 0; - - if (!inv_info || !dmar_domain) - return -EINVAL; - - if (!dev || !dev_is_pci(dev)) - return -ENODEV; - - iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) - return -ENODEV; - - if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE)) - return -EINVAL; - - spin_lock_irqsave(&device_domain_lock, flags); - spin_lock(&iommu->lock); - info = get_domain_info(dev); - if (!info) { - ret = -EINVAL; - goto out_unlock; - } - did = dmar_domain->iommu_did[iommu->seq_id]; - sid = PCI_DEVID(bus, devfn); - - /* Size is only valid in address selective invalidation */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) - size = to_vtd_size(inv_info->granu.addr_info.granule_size, - inv_info->granu.addr_info.nb_granules); - - for_each_set_bit(cache_type, - (unsigned long *)&inv_info->cache, - IOMMU_CACHE_INV_TYPE_NR) { - int granu = 0; - u64 pasid = 0; - u64 addr = 0; - - granu = to_vtd_granularity(cache_type, inv_info->granularity); - if (granu == -EINVAL) { - pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n", - cache_type, inv_info->granularity); - break; - } - - /* - * PASID is stored in different locations based on the - * granularity. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID && - (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID)) - pasid = inv_info->granu.pasid_info.pasid; - else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID)) - pasid = inv_info->granu.addr_info.pasid; - - switch (BIT(cache_type)) { - case IOMMU_CACHE_INV_TYPE_IOTLB: - /* HW will ignore LSB bits based on address mask */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - size && - (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) { - pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n", - inv_info->granu.addr_info.addr, size); - } - - /* - * If granu is PASID-selective, address is ignored. - * We use npages = -1 to indicate that. - */ - qi_flush_piotlb(iommu, did, pasid, - mm_to_dma_pfn(inv_info->granu.addr_info.addr), - (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size, - inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF); - - if (!info->ats_enabled) - break; - /* - * Always flush device IOTLB if ATS is enabled. vIOMMU - * in the guest may assume IOTLB flush is inclusive, - * which is more efficient. - */ - fallthrough; - case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: - /* - * PASID based device TLB invalidation does not support - * IOMMU_INV_GRANU_PASID granularity but only supports - * IOMMU_INV_GRANU_ADDR. - * The equivalent of that is we set the size to be the - * entire range of 64 bit. User only provides PASID info - * without address info. So we set addr to 0. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID) { - size = 64 - VTD_PAGE_SHIFT; - addr = 0; - } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) { - addr = inv_info->granu.addr_info.addr; - } - - if (info->ats_enabled) - qi_flush_dev_iotlb_pasid(iommu, sid, - info->pfsid, pasid, - info->ats_qdep, addr, - size); - else - pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); - break; - default: - dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", - cache_type); - ret = -EINVAL; - } - } -out_unlock: - spin_unlock(&iommu->lock); - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} -#endif - static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot, gfp_t gfp) @@ -5545,24 +5359,6 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, return attach_deferred(dev); } -static int -intel_iommu_enable_nesting(struct iommu_domain *domain) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long flags; - int ret = -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - if (list_empty(&dmar_domain->devices)) { - dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; - dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; - ret = 0; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} - /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -5599,7 +5395,6 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, - .enable_nesting = intel_iommu_enable_nesting, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .aux_attach_dev = intel_iommu_aux_attach_device, @@ -5624,9 +5419,6 @@ const struct iommu_ops intel_iommu_ops = { .def_domain_type = device_def_domain_type, .pgsize_bitmap = SZ_4K, #ifdef CONFIG_INTEL_IOMMU_SVM - .cache_invalidate = intel_iommu_sva_invalidate, - .sva_bind_gpasid = intel_svm_bind_gpasid, - .sva_unbind_gpasid = intel_svm_unbind_gpasid, .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 07c390aed1fe..10fb82ea467d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return 0; } - -static int -intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte, - struct iommu_gpasid_bind_data_vtd *pasid_data) -{ - /* - * Not all guest PASID table entry fields are passed down during bind, - * here we only set up the ones that are dependent on guest settings. - * Execution related bits such as NXE, SMEP are not supported. - * Other fields, such as snoop related, are set based on host needs - * regardless of guest settings. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) { - if (!ecap_srs(iommu->ecap)) { - pr_err_ratelimited("No supervisor request support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_sre(pte); - /* Enable write protect WP if guest requested */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE) - pasid_set_wpe(pte); - } - - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) { - if (!ecap_eafs(iommu->ecap)) { - pr_err_ratelimited("No extended access flag support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_eafe(pte); - } - - /* - * Memory type is only applicable to devices inside processor coherent - * domain. Will add MTS support once coherent devices are available. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) { - pr_warn_ratelimited("No memory type support %s\n", - iommu->name); - return -EINVAL; - } - - return 0; -} - -/** - * intel_pasid_setup_nested() - Set up PASID entry for nested translation. - * This could be used for guest shared virtual address. In this case, the - * first level page tables are used for GVA-GPA translation in the guest, - * second level page tables are used for GPA-HPA translation. - * - * @iommu: IOMMU which the device belong to - * @dev: Device to be set up for translation - * @gpgd: FLPTPTR: First Level Page translation pointer in GPA - * @pasid: PASID to be programmed in the device PASID table - * @pasid_data: Additional PASID info from the guest bind request - * @domain: Domain info for setting up second level page tables - * @addr_width: Address width of the first level (guest) - */ -int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, - pgd_t *gpgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width) -{ - struct pasid_entry *pte; - struct dma_pte *pgd; - int ret = 0; - u64 pgd_val; - int agaw; - u16 did; - - if (!ecap_nest(iommu->ecap)) { - pr_err_ratelimited("IOMMU: %s: No nested translation support\n", - iommu->name); - return -EINVAL; - } - - if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) { - pr_err_ratelimited("Domain is not in nesting mode, %x\n", - domain->flags); - return -EINVAL; - } - - pte = intel_pasid_get_entry(dev, pasid); - if (WARN_ON(!pte)) - return -EINVAL; - - /* - * Caller must ensure PASID entry is not in use, i.e. not bind the - * same PASID to the same device twice. - */ - if (pasid_pte_is_present(pte)) - return -EBUSY; - - pasid_clear_entry(pte); - - /* Sanity checking performed by caller to make sure address - * width matching in two dimensions: - * 1. CPU vs. IOMMU - * 2. Guest vs. Host. - */ - switch (addr_width) { -#ifdef CONFIG_X86 - case ADDR_WIDTH_5LEVEL: - if (!cpu_feature_enabled(X86_FEATURE_LA57) || - !cap_5lp_support(iommu->cap)) { - dev_err_ratelimited(dev, - "5-level paging not supported\n"); - return -EINVAL; - } - - pasid_set_flpm(pte, 1); - break; -#endif - case ADDR_WIDTH_4LEVEL: - pasid_set_flpm(pte, 0); - break; - default: - dev_err_ratelimited(dev, "Invalid guest address width %d\n", - addr_width); - return -EINVAL; - } - - /* First level PGD is in GPA, must be supported by the second level */ - if ((uintptr_t)gpgd > domain->max_addr) { - dev_err_ratelimited(dev, - "Guest PGD %lx not supported, max %llx\n", - (uintptr_t)gpgd, domain->max_addr); - return -EINVAL; - } - pasid_set_flptr(pte, (uintptr_t)gpgd); - - ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data); - if (ret) - return ret; - - /* Setup the second level based on the given domain */ - pgd = domain->pgd; - - agaw = iommu_skip_agaw(domain, iommu, &pgd); - if (agaw < 0) { - dev_err_ratelimited(dev, "Invalid domain page table\n"); - return -EINVAL; - } - pgd_val = virt_to_phys(pgd); - pasid_set_slptr(pte, pgd_val); - pasid_set_fault_enable(pte); - - did = domain->iommu_did[iommu->seq_id]; - pasid_set_domain_id(pte, did); - - pasid_set_address_width(pte, agaw); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); - pasid_set_present(pte); - pasid_flush_caches(iommu, pte, pasid, did); - - return ret; -} diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index d5552e2c160d..ab4408c824a5 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); -int intel_pasid_setup_nested(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width); void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 5b5d69b04fcc..d04c83dd3a58 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -318,193 +318,6 @@ out: return 0; } -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev = NULL; - struct dmar_domain *dmar_domain; - struct device_domain_info *info; - struct intel_svm *svm = NULL; - unsigned long iflags; - int ret = 0; - - if (WARN_ON(!iommu) || !data) - return -EINVAL; - - if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD) - return -EINVAL; - - /* IOMMU core ensures argsz is more than the start of the union */ - if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd)) - return -EINVAL; - - /* Make sure no undefined flags are used in vendor data */ - if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1)) - return -EINVAL; - - if (!dev_is_pci(dev)) - return -ENOTSUPP; - - /* VT-d supports devices with full 20 bit PASIDs only */ - if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX) - return -EINVAL; - - /* - * We only check host PASID range, we have no knowledge to check - * guest PASID range. - */ - if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) - return -EINVAL; - - info = get_domain_info(dev); - if (!info) - return -EINVAL; - - dmar_domain = to_dmar_domain(domain); - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - /* - * Do not allow multiple bindings of the same device-PASID since - * there is only one SL page tables per PASID. We may revisit - * once sharing PGD across domains are supported. - */ - dev_warn_ratelimited(dev, "Already bound with PASID %u\n", - svm->pasid); - ret = -EBUSY; - goto out; - } - - if (!svm) { - /* We come here when PASID has never been bond to a device. */ - svm = kzalloc(sizeof(*svm), GFP_KERNEL); - if (!svm) { - ret = -ENOMEM; - goto out; - } - /* REVISIT: upper layer/VFIO can track host process that bind - * the PASID. ioasid_set = mm might be sufficient for vfio to - * check pasid VMM ownership. We can drop the following line - * once VFIO and IOASID set check is in place. - */ - svm->mm = get_task_mm(current); - svm->pasid = data->hpasid; - if (data->flags & IOMMU_SVA_GPASID_VAL) { - svm->gpasid = data->gpasid; - svm->flags |= SVM_FLAG_GUEST_PASID; - } - pasid_private_add(data->hpasid, svm); - INIT_LIST_HEAD_RCU(&svm->devs); - mmput(svm->mm); - } - sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); - if (!sdev) { - ret = -ENOMEM; - goto out; - } - sdev->dev = dev; - sdev->sid = PCI_DEVID(info->bus, info->devfn); - sdev->iommu = iommu; - - /* Only count users if device has aux domains */ - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users = 1; - - /* Set up device context entry for PASID if not enabled already */ - ret = intel_iommu_enable_pasid(iommu, sdev->dev); - if (ret) { - dev_err_ratelimited(dev, "Failed to enable PASID capability\n"); - kfree(sdev); - goto out; - } - - /* - * PASID table is per device for better security. Therefore, for - * each bind of a new device even with an existing PASID, we need to - * call the nested mode setup function here. - */ - spin_lock_irqsave(&iommu->lock, iflags); - ret = intel_pasid_setup_nested(iommu, dev, - (pgd_t *)(uintptr_t)data->gpgd, - data->hpasid, &data->vendor.vtd, dmar_domain, - data->addr_width); - spin_unlock_irqrestore(&iommu->lock, iflags); - if (ret) { - dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n", - data->hpasid, ret); - /* - * PASID entry should be in cleared state if nested mode - * set up failed. So we only need to clear IOASID tracking - * data such that free call will succeed. - */ - kfree(sdev); - goto out; - } - - svm->flags |= SVM_FLAG_GUEST_MODE; - - init_rcu_head(&sdev->rcu); - list_add_rcu(&sdev->list, &svm->devs); - out: - if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { - pasid_private_remove(data->hpasid); - kfree(svm); - } - - mutex_unlock(&pasid_mutex); - return ret; -} - -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev; - struct intel_svm *svm; - int ret; - - if (WARN_ON(!iommu)) - return -EINVAL; - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users--; - if (!sdev->users) { - list_del_rcu(&sdev->list); - intel_pasid_tear_down_entry(iommu, dev, - svm->pasid, false); - intel_svm_drain_prq(dev, svm->pasid); - kfree_rcu(sdev, rcu); - - if (list_empty(&svm->devs)) { - /* - * We do not free the IOASID here in that - * IOMMU driver did not allocate it. - * Unlike native SVM, IOASID for guest use was - * allocated prior to the bind call. - * In any case, if the free call comes before - * the unbind, IOMMU driver will get notified - * and perform cleanup. - */ - pasid_private_remove(pasid); - kfree(svm); - } - } - } -out: - mutex_unlock(&pasid_mutex); - return ret; -} - static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, unsigned int flags) { @@ -1125,28 +938,6 @@ int intel_svm_page_response(struct device *dev, goto out; } - /* - * For responses from userspace, need to make sure that the - * pasid has been bound to its mm. - */ - if (svm->flags & SVM_FLAG_GUEST_MODE) { - struct mm_struct *mm; - - mm = get_task_mm(current); - if (!mm) { - ret = -EINVAL; - goto out; - } - - if (mm != svm->mm) { - ret = -ENODEV; - mmput(mm); - goto out; - } - - mmput(mm); - } - /* * Per VT-d spec. v3.0 ch7.7, system software must respond * with page group response if private data is present (PDP) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 69230fd695ea..beaacb589abd 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -525,12 +525,6 @@ struct context_entry { */ #define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) -/* - * Domain represents a virtual machine which demands iommu nested - * translation mode support. - */ -#define DOMAIN_FLAG_NESTING_MODE BIT(2) - struct dmar_domain { int nid; /* node id */ @@ -765,9 +759,6 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data); -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid); struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); @@ -795,7 +786,6 @@ struct intel_svm { unsigned int flags; u32 pasid; - int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; }; #else diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 1b73bab7eeff..b3b125b332aa 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -25,17 +25,5 @@ * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE BIT(0) -/* - * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest - * processes. Compared to the host bind, the primary differences are: - * 1. mm life cycle management - * 2. fault reporting - */ -#define SVM_FLAG_GUEST_MODE BIT(1) -/* - * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, - * which requires guest and host PASID translation at both directions. - */ -#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ -- cgit v1.2.3-71-gd317 From 0c9f178778919bb500443f340647b44227778fb2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:42 +0800 Subject: iommu: Remove guest pasid related interfaces and definitions The guest pasid related uapi interfaces and definitions are not referenced anywhere in the tree. We've also reached a consensus to replace them with a new iommufd design. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 210 --------------------------------------------- include/linux/iommu.h | 44 ---------- include/uapi/linux/iommu.h | 181 -------------------------------------- 3 files changed, 435 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 107dcf5938d6..3cbf4781e5bd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2031,216 +2031,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) return 0; } -/* - * Check flags and other user provided data for valid combinations. We also - * make sure no reserved fields or unused flags are set. This is to ensure - * not breaking userspace in the future when these fields or flags are used. - */ -static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) -{ - u32 mask; - int i; - - if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) - return -EINVAL; - - mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; - if (info->cache & ~mask) - return -EINVAL; - - if (info->granularity >= IOMMU_INV_GRANU_NR) - return -EINVAL; - - switch (info->granularity) { - case IOMMU_INV_GRANU_ADDR: - if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) - return -EINVAL; - - mask = IOMMU_INV_ADDR_FLAGS_PASID | - IOMMU_INV_ADDR_FLAGS_ARCHID | - IOMMU_INV_ADDR_FLAGS_LEAF; - - if (info->granu.addr_info.flags & ~mask) - return -EINVAL; - break; - case IOMMU_INV_GRANU_PASID: - mask = IOMMU_INV_PASID_FLAGS_PASID | - IOMMU_INV_PASID_FLAGS_ARCHID; - if (info->granu.pasid_info.flags & ~mask) - return -EINVAL; - - break; - case IOMMU_INV_GRANU_DOMAIN: - if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(info->padding); i++) { - if (info->padding[i]) - return -EINVAL; - } - - return 0; -} - -int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, - void __user *uinfo) -{ - struct iommu_cache_invalidate_info inv_info = { 0 }; - u32 minsz; - int ret; - - if (unlikely(!domain->ops->cache_invalidate)) - return -ENODEV; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_cache_invalidate_info, granu); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(&inv_info, uinfo, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (inv_info.argsz < minsz) - return -EINVAL; - - /* PASID and address granu require additional info beyond minsz */ - if (inv_info.granularity == IOMMU_INV_GRANU_PASID && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) - return -EINVAL; - - if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) - return -EINVAL; - - /* - * User might be using a newer UAPI header which has a larger data - * size, we shall support the existing flags within the current - * size. Copy the remaining user data _after_ minsz but not more - * than the current kernel supported size. - */ - if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, - min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) - return -EFAULT; - - /* Now the argsz is validated, check the content */ - ret = iommu_check_cache_invl_data(&inv_info); - if (ret) - return ret; - - return domain->ops->cache_invalidate(domain, dev, &inv_info); -} -EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); - -static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) -{ - u64 mask; - int i; - - if (data->version != IOMMU_GPASID_BIND_VERSION_1) - return -EINVAL; - - /* Check the range of supported formats */ - if (data->format >= IOMMU_PASID_FORMAT_LAST) - return -EINVAL; - - /* Check all flags */ - mask = IOMMU_SVA_GPASID_VAL; - if (data->flags & ~mask) - return -EINVAL; - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(data->padding); i++) { - if (data->padding[i]) - return -EINVAL; - } - - return 0; -} - -static int iommu_sva_prepare_bind_data(void __user *udata, - struct iommu_gpasid_bind_data *data) -{ - u32 minsz; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_gpasid_bind_data, vendor); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(data, udata, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (data->argsz < minsz) - return -EINVAL; - /* - * User might be using a newer UAPI header, we shall let IOMMU vendor - * driver decide on what size it needs. Since the guest PASID bind data - * can be vendor specific, larger argsz could be the result of extension - * for one vendor but it should not affect another vendor. - * Copy the remaining user data _after_ minsz - */ - if (copy_from_user((void *)data + minsz, udata + minsz, - min_t(u32, data->argsz, sizeof(*data)) - minsz)) - return -EFAULT; - - return iommu_check_bind_data(data); -} - -int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return domain->ops->sva_bind_gpasid(domain, dev, &data); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); - -int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid) -{ - if (unlikely(!domain->ops->sva_unbind_gpasid)) - return -ENODEV; - - return domain->ops->sva_unbind_gpasid(dev, pasid); -} -EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); - -int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); - static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index de0c57a567c8..cde1d0aad60f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -229,9 +229,6 @@ struct iommu_iotlb_gather { * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response - * @cache_invalidate: invalidate translation caches - * @sva_bind_gpasid: bind guest pasid and mm - * @sva_unbind_gpasid: unbind guest pasid and mm * @def_domain_type: device default domain type, return value: * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain @@ -301,12 +298,6 @@ struct iommu_ops { int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); - int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info); - int (*sva_bind_gpasid)(struct iommu_domain *domain, - struct device *dev, struct iommu_gpasid_bind_data *data); - - int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); int (*def_domain_type)(struct device *dev); @@ -421,14 +412,6 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - void __user *uinfo); - -extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); @@ -1051,33 +1034,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) return IOMMU_PASID_INVALID; } -static inline int -iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t pasid) -{ - return -ENODEV; -} - static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return NULL; diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index 59178fc229ca..65d8b0234f69 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -158,185 +158,4 @@ struct iommu_page_response { __u32 code; }; -/* defines the granularity of the invalidation */ -enum iommu_inv_granularity { - IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ - IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ - IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ - IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ -}; - -/** - * struct iommu_inv_addr_info - Address Selective Invalidation Structure - * - * @flags: indicates the granularity of the address-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If ARCHID bit is set, @archid is populated and the invalidation relates - * to cache entries tagged with this architecture specific ID and matching - * the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - If neither PASID or ARCHID is set, global addr invalidation applies. - * - The LEAF flag indicates whether only the leaf PTE caching needs to be - * invalidated and other paging structure caches can be preserved. - * @pasid: process address space ID - * @archid: architecture-specific ID - * @addr: first stage/level input address - * @granule_size: page/block size of the mapping in bytes - * @nb_granules: number of contiguous granules to be invalidated - */ -struct iommu_inv_addr_info { -#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) -#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) -#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) - __u32 flags; - __u32 archid; - __u64 pasid; - __u64 addr; - __u64 granule_size; - __u64 nb_granules; -}; - -/** - * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure - * - * @flags: indicates the granularity of the PASID-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If the ARCHID bit is set, the @archid is populated and the invalidation - * relates to cache entries tagged with this architecture specific ID and - * matching the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - At least one of PASID or ARCHID must be set. - * @pasid: process address space ID - * @archid: architecture-specific ID - */ -struct iommu_inv_pasid_info { -#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) -#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) - __u32 flags; - __u32 archid; - __u64 pasid; -}; - -/** - * struct iommu_cache_invalidate_info - First level/stage invalidation - * information - * @argsz: User filled size of this data - * @version: API version of this structure - * @cache: bitfield that allows to select which caches to invalidate - * @granularity: defines the lowest granularity used for the invalidation: - * domain > PASID > addr - * @padding: reserved for future use (should be zero) - * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID - * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR - * - * Not all the combinations of cache/granularity are valid: - * - * +--------------+---------------+---------------+---------------+ - * | type / | DEV_IOTLB | IOTLB | PASID | - * | granularity | | | cache | - * +==============+===============+===============+===============+ - * | DOMAIN | N/A | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | PASID | Y | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | ADDR | Y | Y | N/A | - * +--------------+---------------+---------------+---------------+ - * - * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than - * @version and @cache. - * - * If multiple cache types are invalidated simultaneously, they all - * must support the used granularity. - */ -struct iommu_cache_invalidate_info { - __u32 argsz; -#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 - __u32 version; -/* IOMMU paging structure cache */ -#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ -#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ -#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ -#define IOMMU_CACHE_INV_TYPE_NR (3) - __u8 cache; - __u8 granularity; - __u8 padding[6]; - union { - struct iommu_inv_pasid_info pasid_info; - struct iommu_inv_addr_info addr_info; - } granu; -}; - -/** - * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest - * SVA binding. - * - * @flags: VT-d PASID table entry attributes - * @pat: Page attribute table data to compute effective memory type - * @emt: Extended memory type - * - * Only guest vIOMMU selectable and effective options are passed down to - * the host IOMMU. - */ -struct iommu_gpasid_bind_data_vtd { -#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ -#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ -#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ -#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ -#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ -#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) - __u64 flags; - __u32 pat; - __u32 emt; -}; - -#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ - IOMMU_SVA_VTD_GPASID_EMTE | \ - IOMMU_SVA_VTD_GPASID_PCD | \ - IOMMU_SVA_VTD_GPASID_PWT) - -/** - * struct iommu_gpasid_bind_data - Information about device and guest PASID binding - * @argsz: User filled size of this data - * @version: Version of this data structure - * @format: PASID table entry format - * @flags: Additional information on guest bind request - * @gpgd: Guest page directory base of the guest mm to bind - * @hpasid: Process address space ID used for the guest mm in host IOMMU - * @gpasid: Process address space ID used for the guest mm in guest IOMMU - * @addr_width: Guest virtual address width - * @padding: Reserved for future use (should be zero) - * @vtd: Intel VT-d specific data - * - * Guest to host PASID mapping can be an identity or non-identity, where guest - * has its own PASID space. For non-identify mapping, guest to host PASID lookup - * is needed when VM programs guest PASID into an assigned device. VMM may - * trap such PASID programming then request host IOMMU driver to convert guest - * PASID to host PASID based on this bind data. - */ -struct iommu_gpasid_bind_data { - __u32 argsz; -#define IOMMU_GPASID_BIND_VERSION_1 1 - __u32 version; -#define IOMMU_PASID_FORMAT_INTEL_VTD 1 -#define IOMMU_PASID_FORMAT_LAST 2 - __u32 format; - __u32 addr_width; -#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ - __u64 flags; - __u64 gpgd; - __u64 hpasid; - __u64 gpasid; - __u8 padding[8]; - /* Vendor specific data */ - union { - struct iommu_gpasid_bind_data_vtd vtd; - } vendor; -}; - #endif /* _UAPI_IOMMU_H */ -- cgit v1.2.3-71-gd317 From 241469685d8d54075f12a6f3eb0628f85e13ff37 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:43 +0800 Subject: iommu/vt-d: Remove aux-domain related callbacks The aux-domain related callbacks are not called in the tree. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/debugfs.c | 3 +- drivers/iommu/intel/iommu.c | 309 +----------------------------------------- include/linux/intel-iommu.h | 17 --- 3 files changed, 3 insertions(+), 326 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index 62e23ff3c987..db7a0ca73626 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data) if (!domain) return 0; - seq_printf(m, "Device %s with pasid %d @0x%llx\n", - dev_name(dev), domain->default_pasid, + seq_printf(m, "Device %s @0x%llx\n", dev_name(dev), (u64)virt_to_phys(domain->pgd)); seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4f9d95067c8f..2b5f4e57a8bb 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1573,18 +1573,6 @@ static void domain_update_iotlb(struct dmar_domain *domain) break; } - if (!has_iotlb_device) { - struct subdev_domain_info *sinfo; - - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - if (info && info->ats_enabled) { - has_iotlb_device = true; - break; - } - } - } - domain->has_iotlb_device = has_iotlb_device; } @@ -1682,7 +1670,6 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, { unsigned long flags; struct device_domain_info *info; - struct subdev_domain_info *sinfo; if (!domain->has_iotlb_device) return; @@ -1691,27 +1678,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, list_for_each_entry(info, &domain->devices, link) __iommu_flush_dev_iotlb(info, addr, mask); - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - __iommu_flush_dev_iotlb(info, addr, mask); - } spin_unlock_irqrestore(&device_domain_lock, flags); } -static void domain_flush_piotlb(struct intel_iommu *iommu, - struct dmar_domain *domain, - u64 addr, unsigned long npages, bool ih) -{ - u16 did = domain->iommu_did[iommu->seq_id]; - - if (domain->default_pasid) - qi_flush_piotlb(iommu, did, domain->default_pasid, - addr, npages, ih); - - if (!list_empty(&domain->devices)) - qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); -} - static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, @@ -1727,7 +1696,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ih = 1 << 6; if (domain_use_first_level(domain)) { - domain_flush_piotlb(iommu, domain, addr, pages, ih); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { /* * Fallback to domain selective flush if no PSI support or @@ -1776,7 +1745,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) u16 did = dmar_domain->iommu_did[iommu->seq_id]; if (domain_use_first_level(dmar_domain)) - domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1983,7 +1952,6 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); - INIT_LIST_HEAD(&domain->subdevices); return domain; } @@ -2676,8 +2644,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->domain = domain; info->iommu = iommu; info->pasid_table = NULL; - info->auxd_enabled = 0; - INIT_LIST_HEAD(&info->subdevices); if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); @@ -4637,183 +4603,6 @@ static void intel_iommu_domain_free(struct iommu_domain *domain) domain_exit(to_dmar_domain(domain)); } -/* - * Check whether a @domain could be attached to the @dev through the - * aux-domain attach/detach APIs. - */ -static inline bool -is_aux_domain(struct device *dev, struct iommu_domain *domain) -{ - struct device_domain_info *info = get_domain_info(dev); - - return info && info->auxd_enabled && - domain->type == IOMMU_DOMAIN_UNMANAGED; -} - -static inline struct subdev_domain_info * -lookup_subdev_info(struct dmar_domain *domain, struct device *dev) -{ - struct subdev_domain_info *sinfo; - - if (!list_empty(&domain->subdevices)) { - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - if (sinfo->pdev == dev) - return sinfo; - } - } - - return NULL; -} - -static int auxiliary_link_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info)) - return -EINVAL; - - if (!sinfo) { - sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC); - if (!sinfo) - return -ENOMEM; - sinfo->domain = domain; - sinfo->pdev = dev; - list_add(&sinfo->link_phys, &info->subdevices); - list_add(&sinfo->link_domain, &domain->subdevices); - } - - return ++sinfo->users; -} - -static int auxiliary_unlink_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - int ret; - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info || !sinfo || sinfo->users <= 0)) - return -EINVAL; - - ret = --sinfo->users; - if (!ret) { - list_del(&sinfo->link_phys); - list_del(&sinfo->link_domain); - kfree(sinfo); - } - - return ret; -} - -static int aux_domain_add_dev(struct dmar_domain *domain, - struct device *dev) -{ - int ret; - unsigned long flags; - struct intel_iommu *iommu; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu) - return -ENODEV; - - if (domain->default_pasid <= 0) { - u32 pasid; - - /* No private data needed for the default pasid */ - pasid = ioasid_alloc(NULL, PASID_MIN, - pci_max_pasids(to_pci_dev(dev)) - 1, - NULL); - if (pasid == INVALID_IOASID) { - pr_err("Can't allocate default pasid\n"); - return -ENODEV; - } - domain->default_pasid = pasid; - } - - spin_lock_irqsave(&device_domain_lock, flags); - ret = auxiliary_link_device(domain, dev); - if (ret <= 0) - goto link_failed; - - /* - * Subdevices from the same physical device can be attached to the - * same domain. For such cases, only the first subdevice attachment - * needs to go through the full steps in this function. So if ret > - * 1, just goto out. - */ - if (ret > 1) - goto out; - - /* - * iommu->lock must be held to attach domain to iommu and setup the - * pasid entry for second level translation. - */ - spin_lock(&iommu->lock); - ret = domain_attach_iommu(domain, iommu); - if (ret) - goto attach_failed; - - /* Setup the PASID entry for mediated devices: */ - if (domain_use_first_level(domain)) - ret = domain_setup_first_level(iommu, domain, dev, - domain->default_pasid); - else - ret = intel_pasid_setup_second_level(iommu, domain, dev, - domain->default_pasid); - if (ret) - goto table_failed; - - spin_unlock(&iommu->lock); -out: - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; - -table_failed: - domain_detach_iommu(domain, iommu); -attach_failed: - spin_unlock(&iommu->lock); - auxiliary_unlink_device(domain, dev); -link_failed: - spin_unlock_irqrestore(&device_domain_lock, flags); - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); - - return ret; -} - -static void aux_domain_remove_dev(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - - if (!is_aux_domain(dev, &domain->domain)) - return; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - iommu = info->iommu; - - if (!auxiliary_unlink_device(domain, dev)) { - spin_lock(&iommu->lock); - intel_pasid_tear_down_entry(iommu, dev, - domain->default_pasid, false); - domain_detach_iommu(domain, iommu); - spin_unlock(&iommu->lock); - } - - spin_unlock_irqrestore(&device_domain_lock, flags); - - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); -} - static int prepare_domain_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -4866,9 +4655,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return -EPERM; } - if (is_aux_domain(dev, domain)) - return -EPERM; - /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { struct dmar_domain *old_domain; @@ -4885,33 +4671,12 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return domain_add_dev_info(to_dmar_domain(domain), dev); } -static int intel_iommu_aux_attach_device(struct iommu_domain *domain, - struct device *dev) -{ - int ret; - - if (!is_aux_domain(dev, domain)) - return -EPERM; - - ret = prepare_domain_attach_device(domain, dev); - if (ret) - return ret; - - return aux_domain_add_dev(to_dmar_domain(domain), dev); -} - static void intel_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { dmar_remove_one_dev_info(dev); } -static void intel_iommu_aux_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - aux_domain_remove_dev(to_dmar_domain(domain), dev); -} - static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot, gfp_t gfp) @@ -5205,46 +4970,6 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -static int intel_iommu_enable_auxd(struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int ret; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu || dmar_disabled) - return -EINVAL; - - if (!sm_supported(iommu) || !pasid_supported(iommu)) - return -EINVAL; - - ret = intel_iommu_enable_pasid(iommu, dev); - if (ret) - return -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - info->auxd_enabled = 1; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - -static int intel_iommu_disable_auxd(struct device *dev) -{ - struct device_domain_info *info; - unsigned long flags; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - if (!WARN_ON(!info)) - info->auxd_enabled = 0; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - static int intel_iommu_enable_sva(struct device *dev) { struct device_domain_info *info = get_domain_info(dev); @@ -5301,9 +5026,6 @@ static int intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_enable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return intel_iommu_enable_iopf(dev); @@ -5319,9 +5041,6 @@ static int intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_disable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return 0; @@ -5333,26 +5052,6 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) } } -static bool -intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat) -{ - struct device_domain_info *info = get_domain_info(dev); - - if (feat == IOMMU_DEV_FEAT_AUX) - return scalable_mode_support() && info && info->auxd_enabled; - - return false; -} - -static int -intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - - return dmar_domain->default_pasid > 0 ? - dmar_domain->default_pasid : -EINVAL; -} - static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { @@ -5397,9 +5096,6 @@ const struct iommu_ops intel_iommu_ops = { .domain_free = intel_iommu_domain_free, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, - .aux_attach_dev = intel_iommu_aux_attach_device, - .aux_detach_dev = intel_iommu_aux_detach_device, - .aux_get_pasid = intel_iommu_aux_get_pasid, .map_pages = intel_iommu_map_pages, .unmap_pages = intel_iommu_unmap_pages, .iotlb_sync_map = intel_iommu_iotlb_sync_map, @@ -5412,7 +5108,6 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .device_group = intel_iommu_device_group, - .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index beaacb589abd..5cfda90b2cca 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -542,7 +542,6 @@ struct dmar_domain { u8 iommu_snooping: 1; /* indicate snooping control feature */ struct list_head devices; /* all devices' list */ - struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ @@ -557,11 +556,6 @@ struct dmar_domain { 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ u64 max_addr; /* maximum mapped address */ - u32 default_pasid; /* - * The default pasid used for non-SVM - * traffic on mediated devices. - */ - struct iommu_domain domain; /* generic domain data structure for iommu core */ }; @@ -614,21 +608,11 @@ struct intel_iommu { void *perf_statistic; }; -/* Per subdevice private data */ -struct subdev_domain_info { - struct list_head link_phys; /* link to phys device siblings */ - struct list_head link_domain; /* link to domain siblings */ - struct device *pdev; /* physical device derived from */ - struct dmar_domain *domain; /* aux-domain */ - int users; /* user count */ -}; - /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ struct list_head table; /* link to pasid table */ - struct list_head subdevices; /* subdevices sibling */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -639,7 +623,6 @@ struct device_domain_info { u8 pri_enabled:1; u8 ats_supported:1; u8 ats_enabled:1; - u8 auxd_enabled:1; /* Multiple domains per device */ u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ -- cgit v1.2.3-71-gd317 From 8652d875939b6a1fe6d5c93cf4fb91df61cda5a7 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:44 +0800 Subject: iommu: Remove aux-domain related interfaces and iommu_ops The aux-domain related interfaces and iommu_ops are not referenced anywhere in the tree. We've also reached a consensus to redesign it based the new iommufd framework. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 46 ---------------------------------------------- include/linux/iommu.h | 29 ----------------------------- 2 files changed, 75 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3cbf4781e5bd..0ebaf561a70e 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2749,8 +2749,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); /* * The device drivers should do the necessary cleanups before calling this. - * For example, before disabling the aux-domain feature, the device driver - * should detach all aux-domains. Otherwise, this will return -EBUSY. */ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { @@ -2778,50 +2776,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) } EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); -/* - * Aux-domain specific attach/detach. - * - * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns - * true. Also, as long as domains are attached to a device through this - * interface, any tries to call iommu_attach_device() should fail - * (iommu_detach_device() can't fail, so we fail when trying to re-attach). - * This should make us safe against a device being attached to a guest as a - * whole while there are still pasid users on it (aux and sva). - */ -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_attach_dev) - ret = domain->ops->aux_attach_dev(domain, dev); - - if (!ret) - trace_attach_device_to_domain(dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_attach_device); - -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ - if (domain->ops->aux_detach_dev) { - domain->ops->aux_detach_dev(domain, dev); - trace_detach_device_from_domain(dev); - } -} -EXPORT_SYMBOL_GPL(iommu_aux_detach_device); - -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_get_pasid) - ret = domain->ops->aux_get_pasid(domain, dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); - /** * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device diff --git a/include/linux/iommu.h b/include/linux/iommu.h index cde1d0aad60f..9983a01373b2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -144,7 +144,6 @@ struct iommu_resv_region { /** * enum iommu_dev_features - Per device IOMMU features - * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally * enabling %IOMMU_DEV_FEAT_SVA requires @@ -157,7 +156,6 @@ struct iommu_resv_region { * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, IOMMU_DEV_FEAT_SVA, IOMMU_DEV_FEAT_IOPF, }; @@ -223,8 +221,6 @@ struct iommu_iotlb_gather { * @dev_has/enable/disable_feat: per device entries to check/enable/disable * iommu specific features. * @dev_feat_enabled: check enabled feature - * @aux_attach/detach_dev: aux-domain specific attach/detach entries. - * @aux_get_pasid: get the pasid given an aux-domain * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle @@ -285,11 +281,6 @@ struct iommu_ops { int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - /* Aux-domain specific attach/detach entries */ - int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); - struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, void *drvdata); void (*sva_unbind)(struct iommu_sva *handle); @@ -655,9 +646,6 @@ void iommu_release_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, @@ -1002,23 +990,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) return -ENODEV; } -static inline int -iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - -static inline void -iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ -} - -static inline int -iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { -- cgit v1.2.3-71-gd317 From 71fe30698dc3be6fd277c49c0c7b220c7ae92ffd Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:45 +0800 Subject: iommu: Remove apply_resv_region The apply_resv_region callback in iommu_ops was introduced to reserve an IOVA range in the given DMA domain when the IOMMU driver manages the IOVA by itself. As all drivers converted to use dma-iommu in the core, there's no driver using this anymore. Remove it to avoid dead code. Suggested-by: Robin Murphy Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-6-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 3 --- include/linux/iommu.h | 4 ---- 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0ebaf561a70e..7cf073c56036 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -790,9 +790,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, dma_addr_t start, end, addr; size_t map_size = 0; - if (domain->ops->apply_resv_region) - domain->ops->apply_resv_region(dev, domain, entry); - start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9983a01373b2..9ffa2e88f3d0 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -214,7 +214,6 @@ struct iommu_iotlb_gather { * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device - * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -268,9 +267,6 @@ struct iommu_ops { /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list); - void (*apply_resv_region)(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); -- cgit v1.2.3-71-gd317 From 3f6634d997dba8140b3a7cba01776b9638d70dff Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:47 +0800 Subject: iommu: Use right way to retrieve iommu_ops The common iommu_ops is hooked to both device and domain. When a helper has both device and domain pointer, the way to get the iommu_ops looks messy in iommu core. This sorts out the way to get iommu_ops. The device related helpers go through device pointer, while the domain related ones go through domain pointer. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-8-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 50 +++++++++++++++++++++++++------------------------- include/linux/iommu.h | 11 +++++++++++ 2 files changed, 36 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7cf073c56036..7af0ee670deb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -323,13 +323,14 @@ err_out: void iommu_release_device(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops; if (!dev->iommu) return; iommu_device_unlink(dev->iommu->iommu_dev, dev); + ops = dev_iommu_ops(dev); ops->release_device(dev); iommu_group_remove_device(dev); @@ -833,8 +834,10 @@ out: static bool iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { - if (domain->ops->is_attach_deferred) - return domain->ops->is_attach_deferred(domain, dev); + const struct iommu_ops *ops = dev_iommu_ops(dev); + + if (ops->is_attach_deferred) + return ops->is_attach_deferred(domain, dev); return false; } @@ -1252,10 +1255,10 @@ int iommu_page_response(struct device *dev, struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; struct dev_iommu *param = dev->iommu; + const struct iommu_ops *ops = dev_iommu_ops(dev); bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (!domain || !domain->ops->page_response) + if (!ops->page_response) return -ENODEV; if (!param || !param->fault_param) @@ -1296,7 +1299,7 @@ int iommu_page_response(struct device *dev, msg->pasid = 0; } - ret = domain->ops->page_response(dev, evt, msg); + ret = ops->page_response(dev, evt, msg); list_del(&evt->list); kfree(evt); break; @@ -1521,7 +1524,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static int iommu_get_def_domain_type(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) return IOMMU_DOMAIN_DMA; @@ -1580,7 +1583,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group, */ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_group *group; int ret; @@ -1588,9 +1591,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (group) return group; - if (!ops) - return ERR_PTR(-EINVAL); - group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) return ERR_PTR(-EINVAL); @@ -1759,10 +1759,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group) static int iommu_group_do_probe_finalize(struct device *dev, void *data) { - struct iommu_domain *domain = data; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (domain->ops->probe_finalize) - domain->ops->probe_finalize(dev); + if (ops->probe_finalize) + ops->probe_finalize(dev); return 0; } @@ -2020,7 +2020,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) return __iommu_attach_device(domain, dev); @@ -2579,17 +2579,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); void iommu_get_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->get_resv_regions) + if (ops->get_resv_regions) ops->get_resv_regions(dev, list); } void iommu_put_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->put_resv_regions) + if (ops->put_resv_regions) ops->put_resv_regions(dev, list); } @@ -2794,9 +2794,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { struct iommu_group *group; struct iommu_sva *handle = ERR_PTR(-EINVAL); - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_bind) + if (!ops->sva_bind) return ERR_PTR(-ENODEV); group = iommu_group_get(dev); @@ -2837,9 +2837,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) { struct iommu_group *group; struct device *dev = handle->dev; - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_unbind) + if (!ops->sva_unbind) return; group = iommu_group_get(dev); @@ -2856,9 +2856,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { - const struct iommu_ops *ops = handle->dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(handle->dev); - if (!ops || !ops->sva_get_pasid) + if (!ops->sva_get_pasid) return IOMMU_PASID_INVALID; return ops->sva_get_pasid(handle); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9ffa2e88f3d0..90f1b5e3809d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -381,6 +381,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) }; } +static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) +{ + /* + * Assume that valid ops must be installed if iommu_probe_device() + * has succeeded. The device ops are essentially for internal use + * within the IOMMU subsystem itself, so we should be able to trust + * ourselves not to misuse the helper. + */ + return dev->iommu->iommu_dev->ops; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ -- cgit v1.2.3-71-gd317 From 41bb23e70b50b89b6137cbecd37009d782454860 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:48 +0800 Subject: iommu: Remove unused argument in is_attach_deferred The is_attach_deferred iommu_ops callback is a device op. The domain argument is unnecessary and never used. Remove it to make code clean. Suggested-by: Robin Murphy Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-9-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 3 +-- drivers/iommu/amd/iommu.c | 3 +-- drivers/iommu/amd/iommu_v2.c | 2 +- drivers/iommu/intel/iommu.c | 3 +-- drivers/iommu/iommu.c | 15 ++++++--------- include/linux/iommu.h | 2 +- 6 files changed, 11 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..3b2f06b7aca6 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) extern bool translation_pre_enabled(struct amd_iommu *iommu); -extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev); +extern bool amd_iommu_is_attach_deferred(struct device *dev); extern int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 461f1844ed1f..37f2fbb4b129 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 58da08cc3d01..7c94ec05d289 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ret = NOTIFY_DONE; /* In kdump kernel pci dev is not initialized yet -> send INVALID */ - if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { + if (amd_iommu_is_attach_deferred(&pdev->dev)) { amd_iommu_complete_ppr(pdev, iommu_fault->pasid, PPR_INVALID, tag); goto out; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2b5f4e57a8bb..80f1294be634 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5052,8 +5052,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) } } -static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool intel_iommu_is_attach_deferred(struct device *dev) { return attach_deferred(dev); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7af0ee670deb..27276421d253 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -831,13 +831,12 @@ out: return ret; } -static bool iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool iommu_is_attach_deferred(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->is_attach_deferred) - return ops->is_attach_deferred(domain, dev); + return ops->is_attach_deferred(dev); return false; } @@ -894,7 +893,7 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); - if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) + if (group->domain && !iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); if (ret) @@ -1745,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data) struct iommu_domain *domain = data; int ret = 0; - if (!iommu_is_attach_deferred(domain, dev)) + if (!iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(domain, dev); return ret; @@ -2020,9 +2019,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return __iommu_attach_device(domain, dev); return 0; @@ -2031,7 +2028,7 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - if (iommu_is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 90f1b5e3809d..1ded6076a75c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -269,7 +269,7 @@ struct iommu_ops { void (*put_resv_regions)(struct device *dev, struct list_head *list); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); - bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + bool (*is_attach_deferred)(struct device *dev); /* Per device IOMMU features */ bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); -- cgit v1.2.3-71-gd317 From 9a630a4b41a2639b65d024a5d2d88ed3ecca130a Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:49 +0800 Subject: iommu: Split struct iommu_ops Move the domain specific operations out of struct iommu_ops into a new structure that only has domain specific operations. This solves the problem of needing to know if the method vector for a given operation needs to be retrieved from the device or the domain. Logically the domain ops are the ones that make sense for external subsystems and endpoint drivers to use, while device ops, with the sole exception of domain_alloc, are IOMMU API internals. Signed-off-by: Lu Baolu Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 20 ++++--- drivers/iommu/apple-dart.c | 20 ++++--- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 18 +++--- drivers/iommu/arm/arm-smmu/arm-smmu.c | 20 ++++--- drivers/iommu/arm/arm-smmu/qcom_iommu.c | 18 +++--- drivers/iommu/exynos-iommu.c | 14 +++-- drivers/iommu/fsl_pamu_domain.c | 10 ++-- drivers/iommu/intel/iommu.c | 20 ++++--- drivers/iommu/iommu.c | 19 +++--- drivers/iommu/ipmmu-vmsa.c | 18 +++--- drivers/iommu/msm_iommu.c | 30 +++++----- drivers/iommu/mtk_iommu.c | 20 ++++--- drivers/iommu/mtk_iommu_v1.c | 14 +++-- drivers/iommu/omap-iommu.c | 14 +++-- drivers/iommu/rockchip-iommu.c | 14 +++-- drivers/iommu/s390-iommu.c | 14 +++-- drivers/iommu/sprd-iommu.c | 18 +++--- drivers/iommu/sun50i-iommu.c | 18 +++--- drivers/iommu/tegra-gart.c | 18 +++--- drivers/iommu/tegra-smmu.c | 14 +++-- drivers/iommu/virtio-iommu.c | 14 +++-- include/linux/iommu.h | 91 +++++++++++++++++------------ 22 files changed, 256 insertions(+), 200 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 37f2fbb4b129..2bed113c4e6d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2268,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev) const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, - .domain_free = amd_iommu_domain_free, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .iotlb_sync_map = amd_iommu_iotlb_sync_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, @@ -2283,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, - .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_sync = amd_iommu_iotlb_sync, .def_domain_type = amd_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = amd_iommu_attach_device, + .detach_dev = amd_iommu_detach_device, + .map = amd_iommu_map, + .unmap = amd_iommu_unmap, + .iotlb_sync_map = amd_iommu_iotlb_sync_map, + .iova_to_phys = amd_iommu_iova_to_phys, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, + .free = amd_iommu_domain_free, + } }; /***************************************************************************** diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 565ef5598811..decafb07ad08 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev, static const struct iommu_ops apple_dart_iommu_ops = { .domain_alloc = apple_dart_domain_alloc, - .domain_free = apple_dart_domain_free, - .attach_dev = apple_dart_attach_dev, - .detach_dev = apple_dart_detach_dev, - .map_pages = apple_dart_map_pages, - .unmap_pages = apple_dart_unmap_pages, - .flush_iotlb_all = apple_dart_flush_iotlb_all, - .iotlb_sync = apple_dart_iotlb_sync, - .iotlb_sync_map = apple_dart_iotlb_sync_map, - .iova_to_phys = apple_dart_iova_to_phys, .probe_device = apple_dart_probe_device, .release_device = apple_dart_release_device, .device_group = apple_dart_device_group, @@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = { .get_resv_regions = apple_dart_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during dart probe */ + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = apple_dart_attach_dev, + .detach_dev = apple_dart_detach_dev, + .map_pages = apple_dart_map_pages, + .unmap_pages = apple_dart_unmap_pages, + .flush_iotlb_all = apple_dart_flush_iotlb_all, + .iotlb_sync = apple_dart_iotlb_sync, + .iotlb_sync_map = apple_dart_iotlb_sync_map, + .iova_to_phys = apple_dart_iova_to_phys, + .free = apple_dart_domain_free, + } }; static irqreturn_t apple_dart_irq(int irq, void *dev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6dc6d8b6b368..fd49282c03a3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev, static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, @@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = { .page_response = arm_smmu_page_response, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .free = arm_smmu_domain_free, + } }; /* Probing and initialisation functions */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 4bc75c4ce402..41321fb47152 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev) static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .probe_finalize = arm_smmu_probe_finalize, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, - .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, + .free = arm_smmu_domain_free, + } }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b91874cb6cf3..ed659de20661 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops qcom_iommu_ops = { .capable = qcom_iommu_capable, .domain_alloc = qcom_iommu_domain_alloc, - .domain_free = qcom_iommu_domain_free, - .attach_dev = qcom_iommu_attach_dev, - .detach_dev = qcom_iommu_detach_dev, - .map = qcom_iommu_map, - .unmap = qcom_iommu_unmap, - .flush_iotlb_all = qcom_iommu_flush_iotlb_all, - .iotlb_sync = qcom_iommu_iotlb_sync, - .iova_to_phys = qcom_iommu_iova_to_phys, .probe_device = qcom_iommu_probe_device, .release_device = qcom_iommu_release_device, .device_group = generic_device_group, .of_xlate = qcom_iommu_of_xlate, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = qcom_iommu_attach_dev, + .detach_dev = qcom_iommu_detach_dev, + .map = qcom_iommu_map, + .unmap = qcom_iommu_unmap, + .flush_iotlb_all = qcom_iommu_flush_iotlb_all, + .iotlb_sync = qcom_iommu_iotlb_sync, + .iova_to_phys = qcom_iommu_iova_to_phys, + .free = qcom_iommu_domain_free, + } }; static int qcom_iommu_sec_ptbl_init(struct device *dev) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 939ffa768986..71f2018e23fe 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev, static const struct iommu_ops exynos_iommu_ops = { .domain_alloc = exynos_iommu_domain_alloc, - .domain_free = exynos_iommu_domain_free, - .attach_dev = exynos_iommu_attach_device, - .detach_dev = exynos_iommu_detach_device, - .map = exynos_iommu_map, - .unmap = exynos_iommu_unmap, - .iova_to_phys = exynos_iommu_iova_to_phys, .device_group = generic_device_group, .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .of_xlate = exynos_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = exynos_iommu_attach_device, + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, + .iova_to_phys = exynos_iommu_iova_to_phys, + .free = exynos_iommu_domain_free, + } }; static int __init exynos_iommu_init(void) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index a47f47307109..69a4a62dc3b9 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev) static const struct iommu_ops fsl_pamu_ops = { .capable = fsl_pamu_capable, .domain_alloc = fsl_pamu_domain_alloc, - .domain_free = fsl_pamu_domain_free, - .attach_dev = fsl_pamu_attach_device, - .detach_dev = fsl_pamu_detach_device, - .iova_to_phys = fsl_pamu_iova_to_phys, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = fsl_pamu_attach_device, + .detach_dev = fsl_pamu_detach_device, + .iova_to_phys = fsl_pamu_iova_to_phys, + .free = fsl_pamu_domain_free, + } }; int __init pamu_domain_init(void) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 80f1294be634..b549172e88ef 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5092,15 +5092,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, - .domain_free = intel_iommu_domain_free, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .map_pages = intel_iommu_map_pages, - .unmap_pages = intel_iommu_unmap_pages, - .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .flush_iotlb_all = intel_flush_iotlb_all, - .iotlb_sync = intel_iommu_tlb_sync, - .iova_to_phys = intel_iommu_iova_to_phys, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, @@ -5118,6 +5109,17 @@ const struct iommu_ops intel_iommu_ops = { .sva_get_pasid = intel_svm_get_pasid, .page_response = intel_svm_page_response, #endif + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + } }; static void quirk_iommu_igfx(struct pci_dev *dev) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 27276421d253..f2c45b85b9fc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1950,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, if (!domain) return NULL; - domain->ops = bus->iommu_ops; domain->type = type; /* Assume all sizes by default; the driver may override this later */ - domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + if (!domain->ops) + domain->ops = bus->iommu_ops->default_domain_ops; if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { iommu_domain_free(domain); @@ -1971,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { iommu_put_dma_cookie(domain); - domain->ops->domain_free(domain); + domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); @@ -2242,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp, size_t *mapped) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; int ret; @@ -2265,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; @@ -2325,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, static int _iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; int ret; ret = __iommu_map(domain, iova, paddr, size, prot, gfp); @@ -2354,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; pgsize = iommu_pgsize(domain, iova, iova, size, &count); @@ -2367,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; unsigned long orig_iova = iova; unsigned int min_pagesz; @@ -2443,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t len = 0, mapped = 0; phys_addr_t start; unsigned int i = 0; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ca752bdc710f..00d5f7a33499 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) static const struct iommu_ops ipmmu_ops = { .domain_alloc = ipmmu_domain_alloc, - .domain_free = ipmmu_domain_free, - .attach_dev = ipmmu_attach_device, - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, - .flush_iotlb_all = ipmmu_flush_iotlb_all, - .iotlb_sync = ipmmu_iotlb_sync, - .iova_to_phys = ipmmu_iova_to_phys, .probe_device = ipmmu_probe_device, .release_device = ipmmu_release_device, .probe_finalize = ipmmu_probe_finalize, @@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = { ? generic_device_group : ipmmu_find_group, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .of_xlate = ipmmu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .flush_iotlb_all = ipmmu_flush_iotlb_all, + .iotlb_sync = ipmmu_iotlb_sync, + .iova_to_phys = ipmmu_iova_to_phys, + .free = ipmmu_domain_free, + } }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 6a2e511edb85..4f441f1b750d 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -668,25 +668,27 @@ fail: static struct iommu_ops msm_iommu_ops = { .domain_alloc = msm_iommu_domain_alloc, - .domain_free = msm_iommu_domain_free, - .attach_dev = msm_iommu_attach_dev, - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ - .iotlb_sync = NULL, - .iotlb_sync_map = msm_iommu_sync_map, - .iova_to_phys = msm_iommu_iova_to_phys, .probe_device = msm_iommu_probe_device, .release_device = msm_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = msm_iommu_attach_dev, + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, + .iotlb_sync_map = msm_iommu_sync_map, + .iova_to_phys = msm_iommu_iova_to_phys, + .free = msm_iommu_domain_free, + } }; static int msm_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 25b834104790..884c288a8f63 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev, static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .flush_iotlb_all = mtk_iommu_flush_iotlb_all, - .iotlb_sync = mtk_iommu_iotlb_sync, - .iotlb_sync_map = mtk_iommu_sync_map, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .release_device = mtk_iommu_release_device, .device_group = mtk_iommu_device_group, @@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, + .iotlb_sync = mtk_iommu_iotlb_sync, + .iotlb_sync_map = mtk_iommu_sync_map, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index be22fcf988ce..293ef7e1a861 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .probe_finalize = mtk_iommu_probe_finalize, .release_device = mtk_iommu_release_device, @@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = { .device_group = generic_device_group, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 980e4af3f06b..4aab631ef517 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) static const struct iommu_ops omap_iommu_ops = { .domain_alloc = omap_iommu_domain_alloc, - .domain_free = omap_iommu_domain_free, - .attach_dev = omap_iommu_attach_dev, - .detach_dev = omap_iommu_detach_dev, - .map = omap_iommu_map, - .unmap = omap_iommu_unmap, - .iova_to_phys = omap_iommu_iova_to_phys, .probe_device = omap_iommu_probe_device, .release_device = omap_iommu_release_device, .device_group = omap_iommu_device_group, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = omap_iommu_attach_dev, + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, + .iova_to_phys = omap_iommu_iova_to_phys, + .free = omap_iommu_domain_free, + } }; static int __init omap_iommu_init(void) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 7f23ad61c094..2ae97a84d41b 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev, static const struct iommu_ops rk_iommu_ops = { .domain_alloc = rk_iommu_domain_alloc, - .domain_free = rk_iommu_domain_free, - .attach_dev = rk_iommu_attach_device, - .detach_dev = rk_iommu_detach_device, - .map = rk_iommu_map, - .unmap = rk_iommu_unmap, .probe_device = rk_iommu_probe_device, .release_device = rk_iommu_release_device, - .iova_to_phys = rk_iommu_iova_to_phys, .device_group = rk_iommu_device_group, .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .of_xlate = rk_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .iova_to_phys = rk_iommu_iova_to_phys, + .free = rk_iommu_domain_free, + } }; static int rk_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 50860ebdd087..3833e86c6e7b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc = s390_domain_alloc, - .domain_free = s390_domain_free, - .attach_dev = s390_iommu_attach_device, - .detach_dev = s390_iommu_detach_device, - .map = s390_iommu_map, - .unmap = s390_iommu_unmap, - .iova_to_phys = s390_iommu_iova_to_phys, .probe_device = s390_iommu_probe_device, .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = S390_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = s390_iommu_attach_device, + .detach_dev = s390_iommu_detach_device, + .map = s390_iommu_map, + .unmap = s390_iommu_unmap, + .iova_to_phys = s390_iommu_iova_to_phys, + .free = s390_domain_free, + } }; static int __init s390_iommu_init(void) diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 27ac818b0354..bd409bab6286 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops sprd_iommu_ops = { .domain_alloc = sprd_iommu_domain_alloc, - .domain_free = sprd_iommu_domain_free, - .attach_dev = sprd_iommu_attach_device, - .detach_dev = sprd_iommu_detach_device, - .map = sprd_iommu_map, - .unmap = sprd_iommu_unmap, - .iotlb_sync_map = sprd_iommu_sync_map, - .iotlb_sync = sprd_iommu_sync, - .iova_to_phys = sprd_iommu_iova_to_phys, .probe_device = sprd_iommu_probe_device, .release_device = sprd_iommu_release_device, .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sprd_iommu_attach_device, + .detach_dev = sprd_iommu_detach_device, + .map = sprd_iommu_map, + .unmap = sprd_iommu_unmap, + .iotlb_sync_map = sprd_iommu_sync_map, + .iotlb_sync = sprd_iommu_sync, + .iova_to_phys = sprd_iommu_iova_to_phys, + .free = sprd_iommu_domain_free, + } }; static const struct of_device_id sprd_iommu_of_match[] = { diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 92997021e188..c54ab477b8fd 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev, static const struct iommu_ops sun50i_iommu_ops = { .pgsize_bitmap = SZ_4K, - .attach_dev = sun50i_iommu_attach_device, - .detach_dev = sun50i_iommu_detach_device, .device_group = sun50i_iommu_device_group, .domain_alloc = sun50i_iommu_domain_alloc, - .domain_free = sun50i_iommu_domain_free, - .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, - .iotlb_sync = sun50i_iommu_iotlb_sync, - .iova_to_phys = sun50i_iommu_iova_to_phys, - .map = sun50i_iommu_map, .of_xlate = sun50i_iommu_of_xlate, .probe_device = sun50i_iommu_probe_device, .release_device = sun50i_iommu_release_device, - .unmap = sun50i_iommu_unmap, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sun50i_iommu_attach_device, + .detach_dev = sun50i_iommu_detach_device, + .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, + .iotlb_sync = sun50i_iommu_iotlb_sync, + .iova_to_phys = sun50i_iommu_iova_to_phys, + .map = sun50i_iommu_map, + .unmap = sun50i_iommu_unmap, + .free = sun50i_iommu_domain_free, + } }; static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index bbd287d19324..a6700a40a6f8 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -272,19 +272,21 @@ static void gart_iommu_sync(struct iommu_domain *domain, static const struct iommu_ops gart_iommu_ops = { .domain_alloc = gart_iommu_domain_alloc, - .domain_free = gart_iommu_domain_free, - .attach_dev = gart_iommu_attach_dev, - .detach_dev = gart_iommu_detach_dev, .probe_device = gart_iommu_probe_device, .release_device = gart_iommu_release_device, .device_group = generic_device_group, - .map = gart_iommu_map, - .unmap = gart_iommu_unmap, - .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, - .iotlb_sync_map = gart_iommu_sync_map, - .iotlb_sync = gart_iommu_sync, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = gart_iommu_attach_dev, + .detach_dev = gart_iommu_detach_dev, + .map = gart_iommu_map, + .unmap = gart_iommu_unmap, + .iova_to_phys = gart_iommu_iova_to_phys, + .iotlb_sync_map = gart_iommu_sync_map, + .iotlb_sync = gart_iommu_sync, + .free = gart_iommu_domain_free, + } }; int tegra_gart_suspend(struct gart_device *gart) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 43df44f918a1..44c3716e16ee 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -963,17 +963,19 @@ static int tegra_smmu_of_xlate(struct device *dev, static const struct iommu_ops tegra_smmu_ops = { .domain_alloc = tegra_smmu_domain_alloc, - .domain_free = tegra_smmu_domain_free, - .attach_dev = tegra_smmu_attach_dev, - .detach_dev = tegra_smmu_detach_dev, .probe_device = tegra_smmu_probe_device, .release_device = tegra_smmu_release_device, .device_group = tegra_smmu_device_group, - .map = tegra_smmu_map, - .unmap = tegra_smmu_unmap, - .iova_to_phys = tegra_smmu_iova_to_phys, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = tegra_smmu_attach_dev, + .detach_dev = tegra_smmu_detach_dev, + .map = tegra_smmu_map, + .unmap = tegra_smmu_unmap, + .iova_to_phys = tegra_smmu_iova_to_phys, + .free = tegra_smmu_domain_free, + } }; static void tegra_smmu_ahb_enable(void) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index f2aa34f57454..25be4b822aa0 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) static struct iommu_ops viommu_ops = { .domain_alloc = viommu_domain_alloc, - .domain_free = viommu_domain_free, - .attach_dev = viommu_attach_dev, - .map = viommu_map, - .unmap = viommu_unmap, - .iova_to_phys = viommu_iova_to_phys, - .iotlb_sync = viommu_iotlb_sync, .probe_device = viommu_probe_device, .probe_finalize = viommu_probe_finalize, .release_device = viommu_release_device, @@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .of_xlate = viommu_of_xlate, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = viommu_attach_dev, + .map = viommu_map, + .unmap = viommu_unmap, + .iova_to_phys = viommu_iova_to_phys, + .iotlb_sync = viommu_iotlb_sync, + .free = viommu_domain_free, + } }; static int viommu_init_vqs(struct viommu_dev *viommu) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1ded6076a75c..9208eca4b0d1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -37,6 +37,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct iommu_domain_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -88,7 +89,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; - const struct iommu_ops *ops; + const struct iommu_domain_ops *ops; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; @@ -192,26 +193,11 @@ struct iommu_iotlb_gather { * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain - * @domain_free: free iommu domain - * @attach_dev: attach device to an iommu domain - * @detach_dev: detach device from an iommu domain - * @map: map a physically contiguous memory region to an iommu domain - * @map_pages: map a physically contiguous set of pages of the same size to - * an iommu domain. - * @unmap: unmap a physically contiguous memory region from an iommu domain - * @unmap_pages: unmap a number of pages of the same size from an iommu domain - * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_sync_map: Sync mappings created recently using @map to the hardware - * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush - * queue - * @iova_to_phys: translate iova to physical address * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @enable_nesting: Enable nesting - * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @of_xlate: add OF master IDs to iommu grouping @@ -228,6 +214,7 @@ struct iommu_iotlb_gather { * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting + * @default_domain_ops: the default ops for domains * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -236,33 +223,11 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - void (*domain_free)(struct iommu_domain *); - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); - int (*map_pages)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t pgsize, size_t pgcount, - int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); - size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, - size_t pgsize, size_t pgcount, - struct iommu_iotlb_gather *iotlb_gather); - void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); - void (*iotlb_sync)(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather); - phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*enable_nesting)(struct iommu_domain *domain); - int (*set_pgtable_quirks)(struct iommu_domain *domain, - unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -288,10 +253,60 @@ struct iommu_ops { int (*def_domain_type)(struct device *dev); + const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; }; +/** + * struct iommu_domain_ops - domain specific operations + * @attach_dev: attach an iommu domain to a device + * @detach_dev: detach an iommu domain from a device + * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. + * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain + * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain + * @iotlb_sync_map: Sync mappings created recently using @map to the hardware + * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush + * queue + * @iova_to_phys: translate iova to physical address + * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) + * @free: Release the domain after use. + */ +struct iommu_domain_ops { + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); + + void (*flush_iotlb_all)(struct iommu_domain *domain); + void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); + + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + dma_addr_t iova); + + int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); + + void (*free)(struct iommu_domain *domain); +}; + /** * struct iommu_device - IOMMU core representation of one IOMMU hardware * instance -- cgit v1.2.3-71-gd317 From 6bb477df04366e0f69dd2f49e1ae1099069326bc Mon Sep 17 00:00:00 2001 From: Yun Zhou Date: Thu, 17 Feb 2022 22:12:34 +0800 Subject: spi: use specific last_cs instead of last_cs_enable Commit d40f0b6f2e21 instroduced last_cs_enable to avoid setting chipselect if it's not necessary, but it also introduces a bug. The chipselect may not be set correctly on multi-device SPI busses. The reason is that we can't judge the chipselect by bool last_cs_enable, since chipselect may be modified after other devices were accessed. So we should record the specific state of chipselect in case of confusion. Signed-off-by: Yun Zhou Link: https://lore.kernel.org/r/20220217141234.72737-1-yun.zhou@windriver.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 8 ++++++-- include/linux/spi/spi.h | 5 +++-- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index cd4dc3131e17..6326e592fcfd 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -926,13 +926,14 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ - if (!force && (spi->controller->last_cs_enable == enable) && + if (!force && ((enable && spi->controller->last_cs == spi->chip_select) || + (!enable && spi->controller->last_cs != spi->chip_select)) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; trace_spi_set_cs(spi, activate); - spi->controller->last_cs_enable = enable; + spi->controller->last_cs = enable ? spi->chip_select : -1; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) { @@ -3016,6 +3017,9 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } + /* setting last_cs to -1 means no chip selected */ + ctlr->last_cs = -1; + status = device_add(&ctlr->dev); if (status < 0) goto free_bus_id; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 579d71cdf6fa..7d005fa4631c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -370,7 +370,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message * @cur_msg_mapped: message has been mapped for DMA - * @last_cs_enable: was enable true on the last call to set_cs. + * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip + * selected * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy @@ -603,7 +604,7 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; - bool last_cs_enable; + char last_cs; bool last_cs_mode_high; bool fallback; struct completion xfer_completion; -- cgit v1.2.3-71-gd317 From 20f01f163203666010ee1560852590a0c0572726 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 24 Jan 2022 13:59:38 -0800 Subject: blk-crypto: show crypto capabilities in sysfs Add sysfs files that expose the inline encryption capabilities of request queues: /sys/block/$disk/queue/crypto/max_dun_bits /sys/block/$disk/queue/crypto/modes/$mode /sys/block/$disk/queue/crypto/num_keyslots Userspace can use these new files to decide what encryption settings to use, or whether to use inline encryption at all. This also brings the crypto capabilities in line with the other queue properties, which are already discoverable via the queue directory in sysfs. Design notes: - Place the new files in a new subdirectory "crypto" to group them together and to avoid complicating the main "queue" directory. This also makes it possible to replace "crypto" with a symlink later if we ever make the blk_crypto_profiles into real kobjects (see below). - It was necessary to define a new kobject that corresponds to the crypto subdirectory. For now, this kobject just contains a pointer to the blk_crypto_profile. Note that multiple queues (and hence multiple such kobjects) may refer to the same blk_crypto_profile. An alternative design would more closely match the current kernel data structures: the blk_crypto_profile could be a kobject itself, located directly under the host controller device's kobject, while /sys/block/$disk/queue/crypto would be a symlink to it. I decided not to do that for now because it would require a lot more changes, such as no longer embedding blk_crypto_profile in other structures, and also because I'm not sure we can rule out moving the crypto capabilities into 'struct queue_limits' in the future. (Even if multiple queues share the same crypto engine, maybe the supported data unit sizes could differ due to other queue properties.) It would also still be possible to switch to that design later without breaking userspace, by replacing the directory with a symlink. - Use "max_dun_bits" instead of "max_dun_bytes". Currently, the kernel internally stores this value in bytes, but that's an implementation detail. It probably makes more sense to talk about this value in bits, and choosing bits is more future-proof. - "modes" is a sub-subdirectory, since there may be multiple supported crypto modes, sysfs is supposed to have one value per file, and it makes sense to group all the mode files together. - Each mode had to be named. The crypto API names like "xts(aes)" are not appropriate because they don't specify the key size. Therefore, I assigned new names. The exact names chosen are arbitrary, but they happen to match the names used in log messages in fs/crypto/. - The "num_keyslots" file is a bit different from the others in that it is only useful to know for performance reasons. However, it's included as it can still be useful. For example, a user might not want to use inline encryption if there aren't very many keyslots. Reviewed-by: Hannes Reinecke Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20220124215938.2769-4-ebiggers@kernel.org Signed-off-by: Jens Axboe --- Documentation/ABI/stable/sysfs-block | 49 ++++++++++ block/Makefile | 3 +- block/blk-crypto-internal.h | 12 +++ block/blk-crypto-sysfs.c | 172 +++++++++++++++++++++++++++++++++++ block/blk-crypto.c | 3 + block/blk-sysfs.c | 6 ++ include/linux/blkdev.h | 1 + 7 files changed, 245 insertions(+), 1 deletion(-) create mode 100644 block/blk-crypto-sysfs.c (limited to 'include/linux') diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 8dd3e84a8aad..e8797cd09aff 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -155,6 +155,55 @@ Description: last zone of the device which may be smaller. +What: /sys/block//queue/crypto/ +Date: February 2022 +Contact: linux-block@vger.kernel.org +Description: + The presence of this subdirectory of /sys/block//queue/ + indicates that the device supports inline encryption. This + subdirectory contains files which describe the inline encryption + capabilities of the device. For more information about inline + encryption, refer to Documentation/block/inline-encryption.rst. + + +What: /sys/block//queue/crypto/max_dun_bits +Date: February 2022 +Contact: linux-block@vger.kernel.org +Description: + [RO] This file shows the maximum length, in bits, of data unit + numbers accepted by the device in inline encryption requests. + + +What: /sys/block//queue/crypto/modes/ +Date: February 2022 +Contact: linux-block@vger.kernel.org +Description: + [RO] For each crypto mode (i.e., encryption/decryption + algorithm) the device supports with inline encryption, a file + will exist at this location. It will contain a hexadecimal + number that is a bitmask of the supported data unit sizes, in + bytes, for that crypto mode. + + Currently, the crypto modes that may be supported are: + + * AES-256-XTS + * AES-128-CBC-ESSIV + * Adiantum + + For example, if a device supports AES-256-XTS inline encryption + with data unit sizes of 512 and 4096 bytes, the file + /sys/block//queue/crypto/modes/AES-256-XTS will exist and + will contain "0x1200". + + +What: /sys/block//queue/crypto/num_keyslots +Date: February 2022 +Contact: linux-block@vger.kernel.org +Description: + [RO] This file shows the number of keyslots the device has for + use with inline encryption. + + What: /sys/block//queue/dax Date: June 2016 Contact: linux-block@vger.kernel.org diff --git a/block/Makefile b/block/Makefile index f38eaa612929..3950ecbc5c26 100644 --- a/block/Makefile +++ b/block/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o obj-$(CONFIG_BLK_PM) += blk-pm.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o +obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \ + blk-crypto-sysfs.o obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 2fb0d65a464c..e6818ffaddbf 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -11,6 +11,7 @@ /* Represents a crypto mode supported by blk-crypto */ struct blk_crypto_mode { + const char *name; /* name of this mode, shown in sysfs */ const char *cipher_str; /* crypto API name (for fallback case) */ unsigned int keysize; /* key size in bytes */ unsigned int ivsize; /* iv size in bytes */ @@ -20,6 +21,10 @@ extern const struct blk_crypto_mode blk_crypto_modes[]; #ifdef CONFIG_BLK_INLINE_ENCRYPTION +int blk_crypto_sysfs_register(struct request_queue *q); + +void blk_crypto_sysfs_unregister(struct request_queue *q); + void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], unsigned int inc); @@ -62,6 +67,13 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq) #else /* CONFIG_BLK_INLINE_ENCRYPTION */ +static inline int blk_crypto_sysfs_register(struct request_queue *q) +{ + return 0; +} + +static inline void blk_crypto_sysfs_unregister(struct request_queue *q) { } + static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) { diff --git a/block/blk-crypto-sysfs.c b/block/blk-crypto-sysfs.c new file mode 100644 index 000000000000..fd93bd2f33b7 --- /dev/null +++ b/block/blk-crypto-sysfs.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2021 Google LLC + * + * sysfs support for blk-crypto. This file contains the code which exports the + * crypto capabilities of devices via /sys/block/$disk/queue/crypto/. + */ + +#include + +#include "blk-crypto-internal.h" + +struct blk_crypto_kobj { + struct kobject kobj; + struct blk_crypto_profile *profile; +}; + +struct blk_crypto_attr { + struct attribute attr; + ssize_t (*show)(struct blk_crypto_profile *profile, + struct blk_crypto_attr *attr, char *page); +}; + +static struct blk_crypto_profile *kobj_to_crypto_profile(struct kobject *kobj) +{ + return container_of(kobj, struct blk_crypto_kobj, kobj)->profile; +} + +static struct blk_crypto_attr *attr_to_crypto_attr(struct attribute *attr) +{ + return container_of(attr, struct blk_crypto_attr, attr); +} + +static ssize_t max_dun_bits_show(struct blk_crypto_profile *profile, + struct blk_crypto_attr *attr, char *page) +{ + return sysfs_emit(page, "%u\n", 8 * profile->max_dun_bytes_supported); +} + +static ssize_t num_keyslots_show(struct blk_crypto_profile *profile, + struct blk_crypto_attr *attr, char *page) +{ + return sysfs_emit(page, "%u\n", profile->num_slots); +} + +#define BLK_CRYPTO_RO_ATTR(_name) \ + static struct blk_crypto_attr _name##_attr = __ATTR_RO(_name) + +BLK_CRYPTO_RO_ATTR(max_dun_bits); +BLK_CRYPTO_RO_ATTR(num_keyslots); + +static struct attribute *blk_crypto_attrs[] = { + &max_dun_bits_attr.attr, + &num_keyslots_attr.attr, + NULL, +}; + +static const struct attribute_group blk_crypto_attr_group = { + .attrs = blk_crypto_attrs, +}; + +/* + * The encryption mode attributes. To avoid hard-coding the list of encryption + * modes, these are initialized at boot time by blk_crypto_sysfs_init(). + */ +static struct blk_crypto_attr __blk_crypto_mode_attrs[BLK_ENCRYPTION_MODE_MAX]; +static struct attribute *blk_crypto_mode_attrs[BLK_ENCRYPTION_MODE_MAX + 1]; + +static umode_t blk_crypto_mode_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj); + struct blk_crypto_attr *a = attr_to_crypto_attr(attr); + int mode_num = a - __blk_crypto_mode_attrs; + + if (profile->modes_supported[mode_num]) + return 0444; + return 0; +} + +static ssize_t blk_crypto_mode_show(struct blk_crypto_profile *profile, + struct blk_crypto_attr *attr, char *page) +{ + int mode_num = attr - __blk_crypto_mode_attrs; + + return sysfs_emit(page, "0x%x\n", profile->modes_supported[mode_num]); +} + +static const struct attribute_group blk_crypto_modes_attr_group = { + .name = "modes", + .attrs = blk_crypto_mode_attrs, + .is_visible = blk_crypto_mode_is_visible, +}; + +static const struct attribute_group *blk_crypto_attr_groups[] = { + &blk_crypto_attr_group, + &blk_crypto_modes_attr_group, + NULL, +}; + +static ssize_t blk_crypto_attr_show(struct kobject *kobj, + struct attribute *attr, char *page) +{ + struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj); + struct blk_crypto_attr *a = attr_to_crypto_attr(attr); + + return a->show(profile, a, page); +} + +static const struct sysfs_ops blk_crypto_attr_ops = { + .show = blk_crypto_attr_show, +}; + +static void blk_crypto_release(struct kobject *kobj) +{ + kfree(container_of(kobj, struct blk_crypto_kobj, kobj)); +} + +static struct kobj_type blk_crypto_ktype = { + .default_groups = blk_crypto_attr_groups, + .sysfs_ops = &blk_crypto_attr_ops, + .release = blk_crypto_release, +}; + +/* + * If the request_queue has a blk_crypto_profile, create the "crypto" + * subdirectory in sysfs (/sys/block/$disk/queue/crypto/). + */ +int blk_crypto_sysfs_register(struct request_queue *q) +{ + struct blk_crypto_kobj *obj; + int err; + + if (!q->crypto_profile) + return 0; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + obj->profile = q->crypto_profile; + + err = kobject_init_and_add(&obj->kobj, &blk_crypto_ktype, &q->kobj, + "crypto"); + if (err) { + kobject_put(&obj->kobj); + return err; + } + q->crypto_kobject = &obj->kobj; + return 0; +} + +void blk_crypto_sysfs_unregister(struct request_queue *q) +{ + kobject_put(q->crypto_kobject); +} + +static int __init blk_crypto_sysfs_init(void) +{ + int i; + + BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); + for (i = 1; i < BLK_ENCRYPTION_MODE_MAX; i++) { + struct blk_crypto_attr *attr = &__blk_crypto_mode_attrs[i]; + + attr->attr.name = blk_crypto_modes[i].name; + attr->attr.mode = 0444; + attr->show = blk_crypto_mode_show; + blk_crypto_mode_attrs[i - 1] = &attr->attr; + } + return 0; +} +subsys_initcall(blk_crypto_sysfs_init); diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 773dae4c329b..a496aaef85ba 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -19,16 +19,19 @@ const struct blk_crypto_mode blk_crypto_modes[] = { [BLK_ENCRYPTION_MODE_AES_256_XTS] = { + .name = "AES-256-XTS", .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, }, [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { + .name = "AES-128-CBC-ESSIV", .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, }, [BLK_ENCRYPTION_MODE_ADIANTUM] = { + .name = "Adiantum", .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b38ef1bbf389..241ded62f458 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -880,6 +880,10 @@ int blk_register_queue(struct gendisk *disk) goto put_dev; } + ret = blk_crypto_sysfs_register(q); + if (ret) + goto put_dev; + blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); wbt_enable_default(q); blk_throtl_register_queue(q); @@ -910,6 +914,7 @@ unlock: return ret; put_dev: + elv_unregister_queue(q); disk_unregister_independent_access_ranges(disk); mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_dir_lock); @@ -954,6 +959,7 @@ void blk_unregister_queue(struct gendisk *disk) */ if (queue_is_mq(q)) blk_mq_unregister_dev(disk_to_dev(disk), q); + blk_crypto_sysfs_unregister(q); blk_trace_remove_sysfs(disk_to_dev(disk)); mutex_lock(&q->sysfs_lock); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f757f9c2871f..e19947d84f12 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -413,6 +413,7 @@ struct request_queue { #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct blk_crypto_profile *crypto_profile; + struct kobject *crypto_kobject; #endif unsigned int rq_timeout; -- cgit v1.2.3-71-gd317 From 25d490eb46486e88c16e64d9eb7cfd33a642d596 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Sat, 18 Dec 2021 09:30:40 +0100 Subject: ARM: 9172/1: amba: Cleanup amba pclk operation There is no user about amba_pclk_[un]prepare() besides pl330.c, directly use clk_[un]prepare(). After this, all the function about amba pclk operation, enable, disable, [un]prepare could be killed. Acked-by: Vinod Koul Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- drivers/dma/pl330.c | 4 ++-- include/linux/amba/bus.h | 20 -------------------- 2 files changed, 2 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 110de8a60058..858400e42ec0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2968,7 +2968,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) struct amba_device *pcdev = to_amba_device(dev); pm_runtime_force_suspend(dev); - amba_pclk_unprepare(pcdev); + clk_unprepare(pcdev->pclk); return 0; } @@ -2978,7 +2978,7 @@ static int __maybe_unused pl330_resume(struct device *dev) struct amba_device *pcdev = to_amba_device(dev); int ret; - ret = amba_pclk_prepare(pcdev); + ret = clk_prepare(pcdev->pclk); if (ret) return ret; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 6c7f47846971..09174970b855 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -121,26 +121,6 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int int amba_request_regions(struct amba_device *, const char *); void amba_release_regions(struct amba_device *); -static inline int amba_pclk_enable(struct amba_device *dev) -{ - return clk_enable(dev->pclk); -} - -static inline void amba_pclk_disable(struct amba_device *dev) -{ - clk_disable(dev->pclk); -} - -static inline int amba_pclk_prepare(struct amba_device *dev) -{ - return clk_prepare(dev->pclk); -} - -static inline void amba_pclk_unprepare(struct amba_device *dev) -{ - clk_unprepare(dev->pclk); -} - /* Some drivers don't use the struct amba_device */ #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) -- cgit v1.2.3-71-gd317 From dacf3ca134d0dc105caee77651a349a86bd77456 Mon Sep 17 00:00:00 2001 From: Wang Kefeng Date: Sat, 18 Dec 2021 09:30:41 +0100 Subject: ARM: 9173/1: amba: kill amba_find_match() There is no one use amba_find_match(), kill it. Signed-off-by: Kefeng Wang Signed-off-by: Russell King (Oracle) --- drivers/amba/bus.c | 61 ------------------------------------------------ include/linux/amba/bus.h | 1 - 2 files changed, 62 deletions(-) (limited to 'include/linux') diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e1a5eca3ae3c..dd0ef65e5c3a 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -669,66 +669,6 @@ void amba_device_unregister(struct amba_device *dev) device_unregister(&dev->dev); } - -struct find_data { - struct amba_device *dev; - struct device *parent; - const char *busid; - unsigned int id; - unsigned int mask; -}; - -static int amba_find_match(struct device *dev, void *data) -{ - struct find_data *d = data; - struct amba_device *pcdev = to_amba_device(dev); - int r; - - r = (pcdev->periphid & d->mask) == d->id; - if (d->parent) - r &= d->parent == dev->parent; - if (d->busid) - r &= strcmp(dev_name(dev), d->busid) == 0; - - if (r) { - get_device(dev); - d->dev = pcdev; - } - - return r; -} - -/** - * amba_find_device - locate an AMBA device given a bus id - * @busid: bus id for device (or NULL) - * @parent: parent device (or NULL) - * @id: peripheral ID (or 0) - * @mask: peripheral ID mask (or 0) - * - * Return the AMBA device corresponding to the supplied parameters. - * If no device matches, returns NULL. - * - * NOTE: When a valid device is found, its refcount is - * incremented, and must be decremented before the returned - * reference. - */ -struct amba_device * -amba_find_device(const char *busid, struct device *parent, unsigned int id, - unsigned int mask) -{ - struct find_data data; - - data.dev = NULL; - data.parent = parent; - data.busid = busid; - data.id = id; - data.mask = mask; - - bus_for_each_dev(&amba_bustype, NULL, &data, amba_find_match); - - return data.dev; -} - /** * amba_request_regions - request all mem regions associated with device * @dev: amba_device structure for device @@ -768,6 +708,5 @@ EXPORT_SYMBOL(amba_driver_register); EXPORT_SYMBOL(amba_driver_unregister); EXPORT_SYMBOL(amba_device_register); EXPORT_SYMBOL(amba_device_unregister); -EXPORT_SYMBOL(amba_find_device); EXPORT_SYMBOL(amba_request_regions); EXPORT_SYMBOL(amba_release_regions); diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 09174970b855..6562f543c3e0 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -117,7 +117,6 @@ void amba_device_put(struct amba_device *); int amba_device_add(struct amba_device *, struct resource *); int amba_device_register(struct amba_device *, struct resource *); void amba_device_unregister(struct amba_device *); -struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); int amba_request_regions(struct amba_device *, const char *); void amba_release_regions(struct amba_device *); -- cgit v1.2.3-71-gd317 From 1a93b82c59ab45f2d9f14c47f09d2e341ff02381 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 18 Feb 2022 07:07:08 -0500 Subject: NFS: constify nfs_server_capable() and nfs_have_writebacks() Signed-off-by: Trond Myklebust --- include/linux/nfs_fs.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 72a732a5103c..6e10725887d1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -363,7 +363,7 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) spin_unlock(&inode->i_lock); } -static inline int nfs_server_capable(struct inode *inode, int cap) +static inline int nfs_server_capable(const struct inode *inode, int cap) { return NFS_SERVER(inode)->caps & cap; } @@ -587,12 +587,11 @@ extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); -static inline int -nfs_have_writebacks(struct inode *inode) +static inline bool nfs_have_writebacks(const struct inode *inode) { if (S_ISREG(inode->i_mode)) return atomic_long_read(&NFS_I(inode)->nrequests) != 0; - return 0; + return false; } /* -- cgit v1.2.3-71-gd317 From a9ff2e99e9fa501ec965da03c18a5422b37a2f44 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 25 Jan 2022 10:17:59 -0500 Subject: SUNRPC: Remove the .svo_enqueue_xprt method We have never been able to track down and address the underlying cause of the performance issues with workqueue-based service support. svo_enqueue_xprt is called multiple times per RPC, so it adds instruction path length, but always ends up at the same function: svc_xprt_do_enqueue(). We do not anticipate needing this flexibility for dynamic nfsd thread management support. As a micro-optimization, remove .svo_enqueue_xprt because Spectre/Meltdown makes virtual function calls more costly. This change essentially reverts commit b9e13cdfac70 ("nfsd/sunrpc: turn enqueueing a svc_xprt into a svc_serv operation"). Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 1 - fs/nfs/callback.c | 2 -- fs/nfsd/nfssvc.c | 1 - include/linux/sunrpc/svc.h | 3 --- include/linux/sunrpc/svc_xprt.h | 1 - net/sunrpc/svc_xprt.c | 10 +++++----- 6 files changed, 5 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 0475c5a5d061..3a05af873625 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -353,7 +353,6 @@ static struct notifier_block lockd_inet6addr_notifier = { static const struct svc_serv_ops lockd_sv_ops = { .svo_shutdown = svc_rpcb_cleanup, .svo_function = lockd, - .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_module = THIS_MODULE, }; diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 054cc1255fac..7a810f885063 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -234,13 +234,11 @@ err_bind: static const struct svc_serv_ops nfs40_cb_sv_ops = { .svo_function = nfs4_callback_svc, - .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_module = THIS_MODULE, }; #if defined(CONFIG_NFS_V4_1) static const struct svc_serv_ops nfs41_cb_sv_ops = { .svo_function = nfs41_callback_svc, - .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_module = THIS_MODULE, }; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b8c682b62d29..aeeac6de1f0a 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -615,7 +615,6 @@ static int nfsd_get_default_max_blksize(void) static const struct svc_serv_ops nfsd_thread_sv_ops = { .svo_shutdown = nfsd_last_thread, .svo_function = nfsd, - .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_module = THIS_MODULE, }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index f35c22b3355f..6ef9c1cafd0b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -61,9 +61,6 @@ struct svc_serv_ops { /* function for service threads to run */ int (*svo_function)(void *); - /* queue up a transport for servicing */ - void (*svo_enqueue_xprt)(struct svc_xprt *); - /* optional module to count when adding threads. * Thread function must call module_put_and_kthread_exit() to exit. */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 571f605bc91e..a3ba027fb4ba 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -131,7 +131,6 @@ int svc_create_xprt(struct svc_serv *, const char *, struct net *, const int, const unsigned short, int, const struct cred *); void svc_xprt_received(struct svc_xprt *xprt); -void svc_xprt_do_enqueue(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b21ad7994147..9fce4f7774bb 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -32,6 +32,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); static void svc_age_temp_xprts(struct timer_list *t); static void svc_delete_xprt(struct svc_xprt *xprt); +static void svc_xprt_do_enqueue(struct svc_xprt *xprt); /* apparently the "standard" is that clients close * idle connections after 5 minutes, servers after @@ -266,12 +267,12 @@ void svc_xprt_received(struct svc_xprt *xprt) } /* As soon as we clear busy, the xprt could be closed and - * 'put', so we need a reference to call svc_enqueue_xprt with: + * 'put', so we need a reference to call svc_xprt_do_enqueue with: */ svc_xprt_get(xprt); smp_mb__before_atomic(); clear_bit(XPT_BUSY, &xprt->xpt_flags); - xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); + svc_xprt_do_enqueue(xprt); svc_xprt_put(xprt); } EXPORT_SYMBOL_GPL(svc_xprt_received); @@ -423,7 +424,7 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) return false; } -void svc_xprt_do_enqueue(struct svc_xprt *xprt) +static void svc_xprt_do_enqueue(struct svc_xprt *xprt) { struct svc_pool *pool; struct svc_rqst *rqstp = NULL; @@ -467,7 +468,6 @@ out_unlock: put_cpu(); trace_svc_xprt_enqueue(xprt, rqstp); } -EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); /* * Queue up a transport with data pending. If there are idle nfsd @@ -478,7 +478,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) { if (test_bit(XPT_BUSY, &xprt->xpt_flags)) return; - xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); + svc_xprt_do_enqueue(xprt); } EXPORT_SYMBOL_GPL(svc_xprt_enqueue); -- cgit v1.2.3-71-gd317 From 87cdd8641c8a1ec6afd2468265e20840a57fd888 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 25 Jan 2022 13:49:29 -0500 Subject: SUNRPC: Remove svo_shutdown method Clean up. Neil observed that "any code that calls svc_shutdown_net() knows what the shutdown function should be, and so can call it directly." Signed-off-by: Chuck Lever Reviewed-by: NeilBrown --- fs/lockd/svc.c | 5 ++--- fs/nfsd/nfssvc.c | 2 +- include/linux/sunrpc/svc.h | 3 --- net/sunrpc/svc.c | 3 --- 4 files changed, 3 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 3a05af873625..f5b688a844aa 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -249,6 +249,7 @@ out_err: printk(KERN_WARNING "lockd_up: makesock failed, error=%d\n", err); svc_shutdown_net(serv, net); + svc_rpcb_cleanup(serv, net); return err; } @@ -287,8 +288,7 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net) cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); svc_shutdown_net(serv, net); - dprintk("%s: per-net data destroyed; net=%x\n", - __func__, net->ns.inum); + svc_rpcb_cleanup(serv, net); } } else { pr_err("%s: no users! net=%x\n", @@ -351,7 +351,6 @@ static struct notifier_block lockd_inet6addr_notifier = { #endif static const struct svc_serv_ops lockd_sv_ops = { - .svo_shutdown = svc_rpcb_cleanup, .svo_function = lockd, .svo_module = THIS_MODULE, }; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index aeeac6de1f0a..0c6b216e439e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -613,7 +613,6 @@ static int nfsd_get_default_max_blksize(void) } static const struct svc_serv_ops nfsd_thread_sv_ops = { - .svo_shutdown = nfsd_last_thread, .svo_function = nfsd, .svo_module = THIS_MODULE, }; @@ -724,6 +723,7 @@ void nfsd_put(struct net *net) if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) { svc_shutdown_net(nn->nfsd_serv, net); + nfsd_last_thread(nn->nfsd_serv, net); svc_destroy(&nn->nfsd_serv->sv_refcnt); spin_lock(&nfsd_notifier_lock); nn->nfsd_serv = NULL; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 6ef9c1cafd0b..63794d772eb3 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -55,9 +55,6 @@ struct svc_pool { struct svc_serv; struct svc_serv_ops { - /* Callback to use when last thread exits. */ - void (*svo_shutdown)(struct svc_serv *, struct net *); - /* function for service threads to run */ int (*svo_function)(void *); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 2aabec2b4bec..74a75a22da9a 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -539,9 +539,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled); void svc_shutdown_net(struct svc_serv *serv, struct net *net) { svc_close_net(serv, net); - - if (serv->sv_ops->svo_shutdown) - serv->sv_ops->svo_shutdown(serv, net); } EXPORT_SYMBOL_GPL(svc_shutdown_net); -- cgit v1.2.3-71-gd317 From 352ad31448fecc78a2e9b78da64eea5d63b8d0ce Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 26 Jan 2022 11:42:08 -0500 Subject: SUNRPC: Rename svc_create_xprt() Clean up: Use the "svc_xprt_" function naming convention as is used for other external APIs. Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 4 ++-- fs/nfs/callback.c | 12 ++++++------ fs/nfsd/nfsctl.c | 8 ++++---- fs/nfsd/nfssvc.c | 8 ++++---- include/linux/sunrpc/svc_xprt.h | 7 ++++--- net/sunrpc/svc_xprt.c | 24 +++++++++++++++++++----- 6 files changed, 39 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index f5b688a844aa..bba6f2b45b64 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -197,8 +197,8 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name, xprt = svc_find_xprt(serv, name, net, family, 0); if (xprt == NULL) - return svc_create_xprt(serv, name, net, family, port, - SVC_SOCK_DEFAULTS, cred); + return svc_xprt_create(serv, name, net, family, port, + SVC_SOCK_DEFAULTS, cred); svc_xprt_put(xprt); return 0; } diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 7a810f885063..c1a8767100ae 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -45,18 +45,18 @@ static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net) int ret; struct nfs_net *nn = net_generic(net, nfs_net_id); - ret = svc_create_xprt(serv, "tcp", net, PF_INET, - nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS, - cred); + ret = svc_xprt_create(serv, "tcp", net, PF_INET, + nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS, + cred); if (ret <= 0) goto out_err; nn->nfs_callback_tcpport = ret; dprintk("NFS: Callback listener port = %u (af %u, net %x)\n", nn->nfs_callback_tcpport, PF_INET, net->ns.inum); - ret = svc_create_xprt(serv, "tcp", net, PF_INET6, - nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS, - cred); + ret = svc_xprt_create(serv, "tcp", net, PF_INET6, + nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS, + cred); if (ret > 0) { nn->nfs_callback_tcpport6 = ret; dprintk("NFS: Callback listener port = %u (af %u, net %x)\n", diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 68b020f2002b..8fec779994f7 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -772,13 +772,13 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr if (err != 0) return err; - err = svc_create_xprt(nn->nfsd_serv, transport, net, - PF_INET, port, SVC_SOCK_ANONYMOUS, cred); + err = svc_xprt_create(nn->nfsd_serv, transport, net, + PF_INET, port, SVC_SOCK_ANONYMOUS, cred); if (err < 0) goto out_err; - err = svc_create_xprt(nn->nfsd_serv, transport, net, - PF_INET6, port, SVC_SOCK_ANONYMOUS, cred); + err = svc_xprt_create(nn->nfsd_serv, transport, net, + PF_INET6, port, SVC_SOCK_ANONYMOUS, cred); if (err < 0 && err != -EAFNOSUPPORT) goto out_close; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 0c6b216e439e..ae25b7b3af99 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -293,13 +293,13 @@ static int nfsd_init_socks(struct net *net, const struct cred *cred) if (!list_empty(&nn->nfsd_serv->sv_permsocks)) return 0; - error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT, - SVC_SOCK_DEFAULTS, cred); + error = svc_xprt_create(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT, + SVC_SOCK_DEFAULTS, cred); if (error < 0) return error; - error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT, - SVC_SOCK_DEFAULTS, cred); + error = svc_xprt_create(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT, + SVC_SOCK_DEFAULTS, cred); if (error < 0) return error; diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index a3ba027fb4ba..a7f6f17c3dc5 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -127,9 +127,10 @@ int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); -int svc_create_xprt(struct svc_serv *, const char *, struct net *, - const int, const unsigned short, int, - const struct cred *); +int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, + struct net *net, const int family, + const unsigned short port, int flags, + const struct cred *cred); void svc_xprt_received(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 1c2295209d08..44be7193cd9b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -285,7 +285,7 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) svc_xprt_received(new); } -static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, +static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags, const struct cred *cred) @@ -321,21 +321,35 @@ static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, return -EPROTONOSUPPORT; } -int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, +/** + * svc_xprt_create - Add a new listener to @serv + * @serv: target RPC service + * @xprt_name: transport class name + * @net: network namespace + * @family: network address family + * @port: listener port + * @flags: SVC_SOCK flags + * @cred: credential to bind to this transport + * + * Return values: + * %0: New listener added successfully + * %-EPROTONOSUPPORT: Requested transport type not supported + */ +int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags, const struct cred *cred) { int err; - err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); + err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred); if (err == -EPROTONOSUPPORT) { request_module("svc%s", xprt_name); - err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); + err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred); } return err; } -EXPORT_SYMBOL_GPL(svc_create_xprt); +EXPORT_SYMBOL_GPL(svc_xprt_create); /* * Copy the local and remote xprt addresses to the rqstp structure -- cgit v1.2.3-71-gd317 From 4355d767a21b9445958fc11bce9a9701f76529d3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 31 Jan 2022 13:34:29 -0500 Subject: SUNRPC: Rename svc_close_xprt() Clean up: Use the "svc_xprt_" function naming convention as is used for other external APIs. Signed-off-by: Chuck Lever --- fs/nfsd/nfsctl.c | 2 +- include/linux/sunrpc/svc_xprt.h | 2 +- net/sunrpc/svc.c | 2 +- net/sunrpc/svc_xprt.c | 9 +++++++-- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 2 +- 5 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 8fec779994f7..16920e4512bd 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -790,7 +790,7 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr out_close: xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port); if (xprt != NULL) { - svc_close_xprt(xprt); + svc_xprt_close(xprt); svc_xprt_put(xprt); } out_err: diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index a7f6f17c3dc5..bf7d029fb48c 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -135,7 +135,7 @@ void svc_xprt_received(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); -void svc_close_xprt(struct svc_xprt *xprt); +void svc_xprt_close(struct svc_xprt *xprt); int svc_port_is_privileged(struct sockaddr *sin); int svc_print_xprts(char *buf, int maxlen); struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 74a75a22da9a..53efef3db3a9 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1352,7 +1352,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) svc_authorise(rqstp); close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) - svc_close_xprt(rqstp->rq_xprt); + svc_xprt_close(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); return 0; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 44be7193cd9b..6809116c996a 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1068,7 +1068,12 @@ static void svc_delete_xprt(struct svc_xprt *xprt) svc_xprt_put(xprt); } -void svc_close_xprt(struct svc_xprt *xprt) +/** + * svc_xprt_close - Close a client connection + * @xprt: transport to disconnect + * + */ +void svc_xprt_close(struct svc_xprt *xprt) { trace_svc_xprt_close(xprt); set_bit(XPT_CLOSE, &xprt->xpt_flags); @@ -1083,7 +1088,7 @@ void svc_close_xprt(struct svc_xprt *xprt) */ svc_delete_xprt(xprt); } -EXPORT_SYMBOL_GPL(svc_close_xprt); +EXPORT_SYMBOL_GPL(svc_xprt_close); static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 16897fcb659c..85c8cdda98b1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -198,7 +198,7 @@ static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst) ret = rpcrdma_bc_send_request(rdma, rqst); if (ret == -ENOTCONN) - svc_close_xprt(sxprt); + svc_xprt_close(sxprt); return ret; } -- cgit v1.2.3-71-gd317 From c7d7ec8f043e53ad16e30f5ebb8b9df415ec0f2b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 26 Jan 2022 11:30:55 -0500 Subject: SUNRPC: Remove svc_shutdown_net() Clean up: svc_shutdown_net() now does nothing but call svc_close_net(). Replace all external call sites. svc_close_net() is renamed to be the inverse of svc_xprt_create(). Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 4 ++-- fs/nfs/callback.c | 2 +- fs/nfsd/nfssvc.c | 2 +- include/linux/sunrpc/svc.h | 1 - include/linux/sunrpc/svc_xprt.h | 1 + net/sunrpc/svc.c | 6 ------ net/sunrpc/svc_xprt.c | 9 +++++++-- 7 files changed, 12 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index bba6f2b45b64..c83ec4a375bc 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -248,7 +248,7 @@ out_err: if (warned++ == 0) printk(KERN_WARNING "lockd_up: makesock failed, error=%d\n", err); - svc_shutdown_net(serv, net); + svc_xprt_destroy_all(serv, net); svc_rpcb_cleanup(serv, net); return err; } @@ -287,7 +287,7 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net) nlm_shutdown_hosts_net(net); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); - svc_shutdown_net(serv, net); + svc_xprt_destroy_all(serv, net); svc_rpcb_cleanup(serv, net); } } else { diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c1a8767100ae..c98c68513590 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -189,7 +189,7 @@ static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struc return; dprintk("NFS: destroy per-net callback data; net=%x\n", net->ns.inum); - svc_shutdown_net(serv, net); + svc_xprt_destroy_all(serv, net); } static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index ae25b7b3af99..b92d272f4ba6 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -722,7 +722,7 @@ void nfsd_put(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) { - svc_shutdown_net(nn->nfsd_serv, net); + svc_xprt_destroy_all(nn->nfsd_serv, net); nfsd_last_thread(nn->nfsd_serv, net); svc_destroy(&nn->nfsd_serv->sv_refcnt); spin_lock(&nfsd_notifier_lock); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 63794d772eb3..5603158b2aa7 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -508,7 +508,6 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, const struct svc_serv_ops *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); -void svc_shutdown_net(struct svc_serv *, struct net *); int svc_process(struct svc_rqst *); int bc_svc_process(struct svc_serv *, struct rpc_rqst *, struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index bf7d029fb48c..42e113742429 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -131,6 +131,7 @@ int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags, const struct cred *cred); +void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net); void svc_xprt_received(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 53efef3db3a9..08d684746452 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -536,12 +536,6 @@ out_err: } EXPORT_SYMBOL_GPL(svc_create_pooled); -void svc_shutdown_net(struct svc_serv *serv, struct net *net) -{ - svc_close_net(serv, net); -} -EXPORT_SYMBOL_GPL(svc_shutdown_net); - /* * Destroy an RPC service. Should be called with appropriate locking to * protect sv_permsocks and sv_tempsocks. diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 6809116c996a..0c117d3bfda8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1140,7 +1140,11 @@ static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) } } -/* +/** + * svc_xprt_destroy_all - Destroy transports associated with @serv + * @serv: RPC service to be shut down + * @net: target network namespace + * * Server threads may still be running (especially in the case where the * service is still running in other network namespaces). * @@ -1152,7 +1156,7 @@ static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) * threads, we may need to wait a little while and then check again to * see if they're done. */ -void svc_close_net(struct svc_serv *serv, struct net *net) +void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net) { int delay = 0; @@ -1163,6 +1167,7 @@ void svc_close_net(struct svc_serv *serv, struct net *net) msleep(delay++); } } +EXPORT_SYMBOL_GPL(svc_xprt_destroy_all); /* * Handle defer and revisit of requests -- cgit v1.2.3-71-gd317 From f49169c97fceb21ad6a0aaf671c50b0f520f15a5 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 16 Feb 2022 12:31:09 -0500 Subject: NFSD: Remove svc_serv_ops::svo_module struct svc_serv_ops is about to be removed. Neil Brown says: > I suspect svo_module can go as well - I don't think the thread is > ever the thing that primarily keeps a module active. A random sample of kthread_create() callers shows sunrpc is the only one that manages module reference count in this way. Suggested-by: Neil Brown Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 4 +--- fs/nfs/callback.c | 7 ++----- fs/nfs/nfs4state.c | 1 - fs/nfsd/nfssvc.c | 3 --- include/linux/sunrpc/svc.h | 5 ----- kernel/module.c | 2 +- net/sunrpc/svc.c | 2 -- 7 files changed, 4 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index c83ec4a375bc..bfde31124f3a 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -184,8 +184,7 @@ lockd(void *vrqstp) dprintk("lockd_down: service stopped\n"); svc_exit_thread(rqstp); - - module_put_and_kthread_exit(0); + return 0; } static int create_lockd_listener(struct svc_serv *serv, const char *name, @@ -352,7 +351,6 @@ static struct notifier_block lockd_inet6addr_notifier = { static const struct svc_serv_ops lockd_sv_ops = { .svo_function = lockd, - .svo_module = THIS_MODULE, }; static int lockd_get(void) diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c98c68513590..a494f9e7bd0a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -92,8 +91,8 @@ nfs4_callback_svc(void *vrqstp) continue; svc_process(rqstp); } + svc_exit_thread(rqstp); - module_put_and_kthread_exit(0); return 0; } @@ -136,8 +135,8 @@ nfs41_callback_svc(void *vrqstp) finish_wait(&serv->sv_cb_waitq, &wq); } } + svc_exit_thread(rqstp); - module_put_and_kthread_exit(0); return 0; } @@ -234,12 +233,10 @@ err_bind: static const struct svc_serv_ops nfs40_cb_sv_ops = { .svo_function = nfs4_callback_svc, - .svo_module = THIS_MODULE, }; #if defined(CONFIG_NFS_V4_1) static const struct svc_serv_ops nfs41_cb_sv_ops = { .svo_function = nfs41_callback_svc, - .svo_module = THIS_MODULE, }; static const struct svc_serv_ops *nfs4_cb_sv_ops[] = { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f5a62c0d999b..02a899e4390f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2697,6 +2697,5 @@ static int nfs4_run_state_manager(void *ptr) allow_signal(SIGKILL); nfs4_state_manager(clp); nfs_put_client(clp); - module_put_and_kthread_exit(0); return 0; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b92d272f4ba6..544187a8a22b 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -614,7 +614,6 @@ static int nfsd_get_default_max_blksize(void) static const struct svc_serv_ops nfsd_thread_sv_ops = { .svo_function = nfsd, - .svo_module = THIS_MODULE, }; void nfsd_shutdown_threads(struct net *net) @@ -1018,8 +1017,6 @@ out: msleep(20); } - /* Release module */ - module_put_and_kthread_exit(0); return 0; } diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 5603158b2aa7..dfc9283f412f 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -57,11 +57,6 @@ struct svc_serv; struct svc_serv_ops { /* function for service threads to run */ int (*svo_function)(void *); - - /* optional module to count when adding threads. - * Thread function must call module_put_and_kthread_exit() to exit. - */ - struct module *svo_module; }; /* diff --git a/kernel/module.c b/kernel/module.c index 46a5c2ed1928..6cea788fd965 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -335,7 +335,7 @@ static inline void add_taint_module(struct module *mod, unsigned flag, /* * A thread that wants to hold a reference to a module only while it - * is running can call this to safely exit. nfsd and lockd use this. + * is running can call this to safely exit. */ void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 08d684746452..a90d555aa163 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -736,11 +736,9 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) if (IS_ERR(rqstp)) return PTR_ERR(rqstp); - __module_get(serv->sv_ops->svo_module); task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { - module_put(serv->sv_ops->svo_module); svc_exit_thread(rqstp); return PTR_ERR(task); } -- cgit v1.2.3-71-gd317 From 37902c6313090235c847af89c5515591261ee338 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 16 Feb 2022 12:16:27 -0500 Subject: NFSD: Move svc_serv_ops::svo_function into struct svc_serv Hoist svo_function back into svc_serv and remove struct svc_serv_ops, since the struct is now devoid of fields. Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 6 +----- fs/nfs/callback.c | 43 +++++++++++-------------------------------- fs/nfsd/nfssvc.c | 7 +------ include/linux/sunrpc/svc.h | 14 ++++---------- net/sunrpc/svc.c | 37 ++++++++++++++++++++++++++----------- 5 files changed, 43 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index bfde31124f3a..59ef8a1f843f 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -349,10 +349,6 @@ static struct notifier_block lockd_inet6addr_notifier = { }; #endif -static const struct svc_serv_ops lockd_sv_ops = { - .svo_function = lockd, -}; - static int lockd_get(void) { struct svc_serv *serv; @@ -376,7 +372,7 @@ static int lockd_get(void) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; - serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops); + serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd); if (!serv) { printk(KERN_WARNING "lockd_up: create service failed\n"); return -ENOMEM; diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index a494f9e7bd0a..456af7d230cf 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -231,29 +231,10 @@ err_bind: return ret; } -static const struct svc_serv_ops nfs40_cb_sv_ops = { - .svo_function = nfs4_callback_svc, -}; -#if defined(CONFIG_NFS_V4_1) -static const struct svc_serv_ops nfs41_cb_sv_ops = { - .svo_function = nfs41_callback_svc, -}; - -static const struct svc_serv_ops *nfs4_cb_sv_ops[] = { - [0] = &nfs40_cb_sv_ops, - [1] = &nfs41_cb_sv_ops, -}; -#else -static const struct svc_serv_ops *nfs4_cb_sv_ops[] = { - [0] = &nfs40_cb_sv_ops, - [1] = NULL, -}; -#endif - static struct svc_serv *nfs_callback_create_svc(int minorversion) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; - const struct svc_serv_ops *sv_ops; + int (*threadfn)(void *data); struct svc_serv *serv; /* @@ -262,17 +243,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) if (cb_info->serv) return svc_get(cb_info->serv); - switch (minorversion) { - case 0: - sv_ops = nfs4_cb_sv_ops[0]; - break; - default: - sv_ops = nfs4_cb_sv_ops[1]; - } - - if (sv_ops == NULL) - return ERR_PTR(-ENOTSUPP); - /* * Sanity check: if there's no task, * we should be the first user ... @@ -281,7 +251,16 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); + threadfn = nfs4_callback_svc; +#if defined(CONFIG_NFS_V4_1) + if (minorversion) + threadfn = nfs41_callback_svc; +#else + if (minorversion) + return ERR_PTR(-ENOTSUPP); +#endif + serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, + threadfn); if (!serv) { printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); return ERR_PTR(-ENOMEM); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 544187a8a22b..5abbe5d1c77f 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -612,10 +612,6 @@ static int nfsd_get_default_max_blksize(void) return ret; } -static const struct svc_serv_ops nfsd_thread_sv_ops = { - .svo_function = nfsd, -}; - void nfsd_shutdown_threads(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); @@ -654,8 +650,7 @@ int nfsd_create_serv(struct net *net) if (nfsd_max_blksize == 0) nfsd_max_blksize = nfsd_get_default_max_blksize(); nfsd_reset_versions(nn); - serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, - &nfsd_thread_sv_ops); + serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd); if (serv == NULL) return -ENOMEM; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index dfc9283f412f..a5dda4987e8b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -52,13 +52,6 @@ struct svc_pool { unsigned long sp_flags; } ____cacheline_aligned_in_smp; -struct svc_serv; - -struct svc_serv_ops { - /* function for service threads to run */ - int (*svo_function)(void *); -}; - /* * RPC service. * @@ -91,7 +84,8 @@ struct svc_serv { unsigned int sv_nrpools; /* number of thread pools */ struct svc_pool * sv_pools; /* array of thread pools */ - const struct svc_serv_ops *sv_ops; /* server operations */ + int (*sv_threadfn)(void *data); + #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same @@ -492,7 +486,7 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); struct svc_serv *svc_create(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + int (*threadfn)(void *data)); struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); void svc_rqst_replace_page(struct svc_rqst *rqstp, @@ -500,7 +494,7 @@ void svc_rqst_replace_page(struct svc_rqst *rqstp, void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + int (*threadfn)(void *data)); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); int svc_process(struct svc_rqst *); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index a90d555aa163..557004017548 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -448,7 +448,7 @@ __svc_init_bc(struct svc_serv *serv) */ static struct svc_serv * __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, - const struct svc_serv_ops *ops) + int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int vers; @@ -465,7 +465,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, bufsize = RPCSVC_MAXPAYLOAD; serv->sv_max_payload = bufsize? bufsize : 4096; serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); - serv->sv_ops = ops; + serv->sv_threadfn = threadfn; xdrsize = 0; while (prog) { prog->pg_lovers = prog->pg_nvers-1; @@ -511,22 +511,37 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, return serv; } -struct svc_serv * -svc_create(struct svc_program *prog, unsigned int bufsize, - const struct svc_serv_ops *ops) +/** + * svc_create - Create an RPC service + * @prog: the RPC program the new service will handle + * @bufsize: maximum message size for @prog + * @threadfn: a function to service RPC requests for @prog + * + * Returns an instantiated struct svc_serv object or NULL. + */ +struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, + int (*threadfn)(void *data)) { - return __svc_create(prog, bufsize, /*npools*/1, ops); + return __svc_create(prog, bufsize, 1, threadfn); } EXPORT_SYMBOL_GPL(svc_create); -struct svc_serv * -svc_create_pooled(struct svc_program *prog, unsigned int bufsize, - const struct svc_serv_ops *ops) +/** + * svc_create_pooled - Create an RPC service with pooled threads + * @prog: the RPC program the new service will handle + * @bufsize: maximum message size for @prog + * @threadfn: a function to service RPC requests for @prog + * + * Returns an instantiated struct svc_serv object or NULL. + */ +struct svc_serv *svc_create_pooled(struct svc_program *prog, + unsigned int bufsize, + int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int npools = svc_pool_map_get(); - serv = __svc_create(prog, bufsize, npools, ops); + serv = __svc_create(prog, bufsize, npools, threadfn); if (!serv) goto out_err; return serv; @@ -736,7 +751,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) if (IS_ERR(rqstp)) return PTR_ERR(rqstp); - task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp, + task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { svc_exit_thread(rqstp); -- cgit v1.2.3-71-gd317 From 74aaf96feaca80285912cc6f19575b3e97177918 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 22 Feb 2022 13:10:52 -0500 Subject: SUNRPC: Teach server to recognize RPC_AUTH_TLS Initial support for the RPC_AUTH_TLS authentication flavor enables NFSD to eventually accept an RPC_AUTH_TLS probe from clients. This patch simply prevents NFSD from rejecting these probes completely. In the meantime, graft this support in now so that RPC_AUTH_TLS support keeps up with generic code and API changes in the RPC server. Down the road, server-side transport implementations will populate xpo_start_tls when they can support RPC-with-TLS. For example, TCP will eventually populate it, but RDMA won't. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_xprt.h | 1 + net/sunrpc/svcauth.c | 2 ++ net/sunrpc/svcauth_unix.c | 60 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 42e113742429..20068ccfd0cc 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -28,6 +28,7 @@ struct svc_xprt_ops { void (*xpo_free)(struct svc_xprt *); void (*xpo_secure_port)(struct svc_rqst *rqstp); void (*xpo_kill_temp_xprt)(struct svc_xprt *); + void (*xpo_start_tls)(struct svc_xprt *); }; struct svc_xprt_class { diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 5a8b8e03fdd4..e72ba2f13f6c 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -31,10 +31,12 @@ */ extern struct auth_ops svcauth_null; extern struct auth_ops svcauth_unix; +extern struct auth_ops svcauth_tls; static struct auth_ops __rcu *authtab[RPC_AUTH_MAXFLAVOR] = { [RPC_AUTH_NULL] = (struct auth_ops __force __rcu *)&svcauth_null, [RPC_AUTH_UNIX] = (struct auth_ops __force __rcu *)&svcauth_unix, + [RPC_AUTH_TLS] = (struct auth_ops __force __rcu *)&svcauth_tls, }; static struct auth_ops * diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index d7ed7d49115a..b1efc34db6ed 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -37,6 +37,7 @@ struct unix_domain { extern struct auth_ops svcauth_null; extern struct auth_ops svcauth_unix; +extern struct auth_ops svcauth_tls; static void svcauth_unix_domain_release_rcu(struct rcu_head *head) { @@ -788,6 +789,65 @@ struct auth_ops svcauth_null = { }; +static int +svcauth_tls_accept(struct svc_rqst *rqstp) +{ + struct svc_cred *cred = &rqstp->rq_cred; + struct kvec *argv = rqstp->rq_arg.head; + struct kvec *resv = rqstp->rq_res.head; + + if (argv->iov_len < XDR_UNIT * 3) + return SVC_GARBAGE; + + /* Call's cred length */ + if (svc_getu32(argv) != xdr_zero) { + rqstp->rq_auth_stat = rpc_autherr_badcred; + return SVC_DENIED; + } + + /* Call's verifier flavor and its length */ + if (svc_getu32(argv) != rpc_auth_null || + svc_getu32(argv) != xdr_zero) { + rqstp->rq_auth_stat = rpc_autherr_badverf; + return SVC_DENIED; + } + + /* AUTH_TLS is not valid on non-NULL procedures */ + if (rqstp->rq_proc != 0) { + rqstp->rq_auth_stat = rpc_autherr_badcred; + return SVC_DENIED; + } + + /* Mapping to nobody uid/gid is required */ + cred->cr_uid = INVALID_UID; + cred->cr_gid = INVALID_GID; + cred->cr_group_info = groups_alloc(0); + if (cred->cr_group_info == NULL) + return SVC_CLOSE; /* kmalloc failure - client must retry */ + + /* Reply's verifier */ + svc_putnl(resv, RPC_AUTH_NULL); + if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) { + svc_putnl(resv, 8); + memcpy(resv->iov_base + resv->iov_len, "STARTTLS", 8); + resv->iov_len += 8; + } else + svc_putnl(resv, 0); + + rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS; + return SVC_OK; +} + +struct auth_ops svcauth_tls = { + .name = "tls", + .owner = THIS_MODULE, + .flavour = RPC_AUTH_TLS, + .accept = svcauth_tls_accept, + .release = svcauth_null_release, + .set_client = svcauth_unix_set_client, +}; + + static int svcauth_unix_accept(struct svc_rqst *rqstp) { -- cgit v1.2.3-71-gd317 From 797bd4d41c8b41afc10fad39146ac9558cb39e94 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 24 Feb 2022 10:55:54 +0100 Subject: tty: serial: define UART_LCR_WLEN() macro Define a generic UART_LCR_WLEN() macro with a size argument. It can be used to encode byte size into an LCR value. Therefore we can use it to simplify the drivers using tty_get_char_size() in the next patches. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20220224095558.30929-1-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- include/linux/serial.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/serial.h b/include/linux/serial.h index 0916107c77f9..0b8b7d7c8f33 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -12,6 +12,8 @@ #include #include +/* Helper for dealing with UART_LCR_WLEN* defines */ +#define UART_LCR_WLEN(x) ((x) - 5) /* * Counters of the input lines (CTS, DSR, RI, CD) interrupts -- cgit v1.2.3-71-gd317 From 17a8f31bba7bac8cce4bd12bab50697da96e7710 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 28 Feb 2022 04:18:05 +0100 Subject: netfilter: egress: silence egress hook lockdep splats Netfilter assumes its called with rcu_read_lock held, but in egress hook case it may be called with BH readlock. This triggers lockdep splat. In order to avoid to change all rcu_dereference() to rcu_dereference_check(..., rcu_read_lock_bh_held()), wrap nf_hook_slow with read lock/unlock pair. Reported-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_netdev.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h index b4dd96e4dc8d..e6487a691136 100644 --- a/include/linux/netfilter_netdev.h +++ b/include/linux/netfilter_netdev.h @@ -101,7 +101,11 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, nf_hook_state_init(&state, NF_NETDEV_EGRESS, NFPROTO_NETDEV, dev, NULL, NULL, dev_net(dev), NULL); + + /* nf assumes rcu_read_lock, not just read_lock_bh */ + rcu_read_lock(); ret = nf_hook_slow(skb, &state, e, 0); + rcu_read_unlock(); if (ret == 1) { return skb; -- cgit v1.2.3-71-gd317 From dcfd5192563909219f6304b4e3e10db071158eef Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Tue, 15 Feb 2022 13:46:51 -0500 Subject: soc: mediatek: mtk-infracfg: Disable ACP on MT8192 MT8192 contains an experimental Accelerator Coherency Port implementation, which does not work correctly but was unintentionally enabled by default. For correct operation of the GPU, we must set a chicken bit disabling ACP on MT8192. Adapted from the following downstream change to the out-of-tree, legacy Mali GPU driver: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2781271/5 Note this change is required for both Panfrost and the legacy kernel driver. Co-developed-by: Robin Murphy Signed-off-by: Robin Murphy Signed-off-by: Alyssa Rosenzweig Cc: Nick Fan Cc: Nicolas Boichat Cc: Chen-Yu Tsai Cc: Stephen Boyd Cc: AngeloGioacchino Del Regno Tested-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20220215184651.12168-1-alyssa.rosenzweig@collabora.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-infracfg.c | 19 +++++++++++++++++++ include/linux/soc/mediatek/infracfg.h | 3 +++ 2 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c index 0590b68e0d78..2acf19676af2 100644 --- a/drivers/soc/mediatek/mtk-infracfg.c +++ b/drivers/soc/mediatek/mtk-infracfg.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -72,3 +73,21 @@ int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, return ret; } + +static int __init mtk_infracfg_init(void) +{ + struct regmap *infracfg; + + /* + * MT8192 has an experimental path to route GPU traffic to the DSU's + * Accelerator Coherency Port, which is inadvertently enabled by + * default. It turns out not to work, so disable it to prevent spurious + * GPU faults. + */ + infracfg = syscon_regmap_lookup_by_compatible("mediatek,mt8192-infracfg"); + if (!IS_ERR(infracfg)) + regmap_set_bits(infracfg, MT8192_INFRA_CTRL, + MT8192_INFRA_CTRL_DISABLE_MFG2ACP); + return 0; +} +postcore_initcall(mtk_infracfg_init); diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 8a1c2040a28e..50804ac748bd 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -277,6 +277,9 @@ #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 #define INFRA_TOPAXI_PROTECTEN_CLR 0x0264 +#define MT8192_INFRA_CTRL 0x290 +#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9) + #define REG_INFRA_MISC 0xf00 #define F_DDR_4GB_SUPPORT_EN BIT(13) -- cgit v1.2.3-71-gd317 From cb66b9ba5cdacab5592bce31a4083fc4ac172ea5 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 28 Feb 2022 22:58:28 -0800 Subject: Input: add input_copy_abs() function Add a new helper function to copy absinfo from one input_dev to another input_dev. This is useful to e.g. setup a pen/stylus input-device for combined touchscreen/pen hardware where the pen uses the same coordinates as the touchscreen. Suggested-by: Dmitry Torokhov Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20220131143539.109142-2-hdegoede@redhat.com Signed-off-by: Dmitry Torokhov --- drivers/input/input.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/input.h | 2 ++ 2 files changed, 38 insertions(+) (limited to 'include/linux') diff --git a/drivers/input/input.c b/drivers/input/input.c index 3a5156012fb8..4456e82d370b 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -526,6 +526,42 @@ void input_set_abs_params(struct input_dev *dev, unsigned int axis, } EXPORT_SYMBOL(input_set_abs_params); +/** + * input_copy_abs - Copy absinfo from one input_dev to another + * @dst: Destination input device to copy the abs settings to + * @dst_axis: ABS_* value selecting the destination axis + * @src: Source input device to copy the abs settings from + * @src_axis: ABS_* value selecting the source axis + * + * Set absinfo for the selected destination axis by copying it from + * the specified source input device's source axis. + * This is useful to e.g. setup a pen/stylus input-device for combined + * touchscreen/pen hardware where the pen uses the same coordinates as + * the touchscreen. + */ +void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, + const struct input_dev *src, unsigned int src_axis) +{ + /* src must have EV_ABS and src_axis set */ + if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && + test_bit(src_axis, src->absbit)))) + return; + + /* + * input_alloc_absinfo() may have failed for the source. Our caller is + * expected to catch this when registering the input devices, which may + * happen after the input_copy_abs() call. + */ + if (!src->absinfo) + return; + + input_set_capability(dst, EV_ABS, dst_axis); + if (!dst->absinfo) + return; + + dst->absinfo[dst_axis] = src->absinfo[src_axis]; +} +EXPORT_SYMBOL(input_copy_abs); /** * input_grab_device - grabs device for exclusive use diff --git a/include/linux/input.h b/include/linux/input.h index 0354b298d874..49790c1bd2c4 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even void input_alloc_absinfo(struct input_dev *dev); void input_set_abs_params(struct input_dev *dev, unsigned int axis, int min, int max, int fuzz, int flat); +void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, + const struct input_dev *src, unsigned int src_axis); #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ static inline int input_abs_get_##_suffix(struct input_dev *dev, \ -- cgit v1.2.3-71-gd317 From 50bb467c9e76743fbc8441d29113cdad62dbc4fe Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Fri, 18 Feb 2022 09:38:58 +0000 Subject: rfkill: define rfill_soft_blocked() if !RFKILL If CONFIG_RFKILL is not set, the Intel WiFi driver will not build the iw_mvm driver part due to the missing rfill_soft_blocked() call. Adding a inline declaration of rfill_soft_blocked() if CONFIG_RFKILL=n fixes the following error: drivers/net/wireless/intel/iwlwifi/mvm/mvm.h: In function 'iwl_mvm_mei_set_sw_rfkill_state': drivers/net/wireless/intel/iwlwifi/mvm/mvm.h:2215:38: error: implicit declaration of function 'rfkill_soft_blocked'; did you mean 'rfkill_blocked'? [-Werror=implicit-function-declaration] 2215 | mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; | ^~~~~~~~~~~~~~~~~~~ | rfkill_blocked Signed-off-by: Ben Dooks Reported-by: Neill Whillans Fixes: 5bc9a9dd7535 ("rfkill: allow to get the software rfkill state") Link: https://lore.kernel.org/r/20220218093858.1245677-1-ben.dooks@codethink.co.uk Signed-off-by: Johannes Berg --- include/linux/rfkill.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c35f3962dc4f..373003ace639 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -308,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill) return false; } +static inline bool rfkill_soft_blocked(struct rfkill *rfkill) +{ + return false; +} + static inline enum rfkill_type rfkill_find_type(const char *name) { return RFKILL_TYPE_ALL; -- cgit v1.2.3-71-gd317 From 2f6f66ccd21e854cd3743bc8def68f8b4e7d91fc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 25 Feb 2022 18:22:44 +0000 Subject: KVM: Drop kvm_reload_remote_mmus(), open code request in x86 users Remove the generic kvm_reload_remote_mmus() and open code its functionality into the two x86 callers. x86 is (obviously) the only architecture that uses the hook, and is also the only architecture that uses KVM_REQ_MMU_RELOAD in a way that's consistent with the name. That will change in a future patch, as x86's usage when zapping a single shadow page x86 doesn't actually _need_ to reload all vCPUs' MMUs, only MMUs whose root is being zapped actually need to be reloaded. s390 also uses KVM_REQ_MMU_RELOAD, but for a slightly different purpose. Drop the generic code in anticipation of implementing s390 and x86 arch specific requests, which will allow dropping KVM_REQ_MMU_RELOAD entirely. Opportunistically reword the x86 TDP MMU comment to avoid making references to functions (and requests!) when possible, and to remove the rather ambiguous "this". No functional change intended. Cc: Ben Gardon Signed-off-by: Sean Christopherson Reviewed-by: Ben Gardon Message-Id: <20220225182248.3812651-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 14 +++++++------- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 5 ----- 3 files changed, 7 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b2c1c4eb6007..32c6d4b33d03 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2353,7 +2353,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, * treats invalid shadow pages as being obsolete. */ if (!is_obsolete_sp(kvm, sp)) - kvm_reload_remote_mmus(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } if (sp->lpage_disallowed) @@ -5639,11 +5639,11 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) */ kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; - /* In order to ensure all threads see this change when - * handling the MMU reload signal, this must happen in the - * same critical section as kvm_reload_remote_mmus, and - * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages - * could drop the MMU lock and yield. + /* + * In order to ensure all vCPUs drop their soon-to-be invalid roots, + * invalidating TDP MMU roots must be done while holding mmu_lock for + * write and in the same critical section as making the reload request, + * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield. */ if (is_tdp_mmu_enabled(kvm)) kvm_tdp_mmu_invalidate_all_roots(kvm); @@ -5656,7 +5656,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) * Note: we need to do this under the protection of mmu_lock, * otherwise, vcpu would purge shadow page but miss tlb flush. */ - kvm_reload_remote_mmus(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); kvm_zap_obsolete_pages(kvm); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f11039944c08..0aeb47cffd43 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1325,7 +1325,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); -void kvm_reload_remote_mmus(struct kvm *kvm); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cdf1fa3c60ae..64eb99444688 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -354,11 +354,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif -void kvm_reload_remote_mmus(struct kvm *kvm) -{ - kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); -} - #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) -- cgit v1.2.3-71-gd317 From e65a3b46b5b1fab92c3273bcf71e028a4d307400 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 25 Feb 2022 18:22:47 +0000 Subject: KVM: Drop KVM_REQ_MMU_RELOAD and update vcpu-requests.rst documentation Remove the now unused KVM_REQ_MMU_RELOAD, shift KVM_REQ_VM_DEAD into the unoccupied space, and update vcpu-requests.rst, which was missing an entry for KVM_REQ_VM_DEAD. Switching KVM_REQ_VM_DEAD to entry '1' also fixes the stale comment about bits 4-7 being reserved. Reviewed-by: Claudio Imbrenda Signed-off-by: Sean Christopherson Reviewed-by: Ben Gardon Message-Id: <20220225182248.3812651-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/vcpu-requests.rst | 7 +++---- include/linux/kvm_host.h | 3 +-- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index ad2915ef7020..b61d48aec36c 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -112,11 +112,10 @@ KVM_REQ_TLB_FLUSH choose to use the common kvm_flush_remote_tlbs() implementation will need to handle this VCPU request. -KVM_REQ_MMU_RELOAD +KVM_REQ_VM_DEAD - When shadow page tables are used and memory slots are removed it's - necessary to inform each VCPU to completely refresh the tables. This - request is used for that. + This request informs all VCPUs that the VM is dead and unusable, e.g. due to + fatal error or because the VM's state has been intentionally destroyed. KVM_REQ_UNBLOCK diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0aeb47cffd43..9536ffa0473b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -153,10 +153,9 @@ static inline bool is_error_page(struct page *page) * Bits 4-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 -#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 -- cgit v1.2.3-71-gd317 From e45f1c1d70cae0d7a28ad60f9c6391e210354f0b Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 1 Mar 2022 11:07:35 +0200 Subject: interconnect: Add stubs for the bulk API Add stub functions for the bulk API to allow compile testing. Reviewed-by: Alex Elder Link: https://lore.kernel.org/r/20220301090735.26599-1-djakov@kernel.org Signed-off-by: Georgi Djakov --- include/linux/interconnect.h | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index f2dd2fc8d3cd..f685777b875e 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -38,13 +38,6 @@ struct icc_bulk_data { u32 peak_bw; }; -int __must_check of_icc_bulk_get(struct device *dev, int num_paths, - struct icc_bulk_data *paths); -void icc_bulk_put(int num_paths, struct icc_bulk_data *paths); -int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths); -int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths); -void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths); - #if IS_ENABLED(CONFIG_INTERCONNECT) struct icc_path *icc_get(struct device *dev, const int src_id, @@ -58,6 +51,12 @@ int icc_disable(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); const char *icc_get_name(struct icc_path *path); +int __must_check of_icc_bulk_get(struct device *dev, int num_paths, + struct icc_bulk_data *paths); +void icc_bulk_put(int num_paths, struct icc_bulk_data *paths); +int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths); +int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths); +void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths); #else @@ -112,6 +111,29 @@ static inline const char *icc_get_name(struct icc_path *path) return NULL; } +static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths) +{ + return 0; +} + +static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) +{ +} + +static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths) +{ + return 0; +} + +static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths) +{ + return 0; +} + +static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths) +{ +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ -- cgit v1.2.3-71-gd317 From 1c1813a743fe84d0dcf53743baa4edc1f74c44c8 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:15 +0100 Subject: HID: core: statically allocate read buffers This is a preparation patch for rethinking the generic processing of HID reports. We can actually pre-allocate all of our memory instead of dynamically allocating/freeing it whenever we parse a report. Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 12 +++++------- include/linux/hid.h | 1 + 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f1aed5bbd000..75e7b8447bf7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - usages * sizeof(unsigned)), GFP_KERNEL); + 2 * usages * sizeof(unsigned int)), GFP_KERNEL); if (!field) return NULL; @@ -109,6 +109,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned report->field[field->index] = field; field->usage = (struct hid_usage *)(field + 1); field->value = (s32 *)(field->usage + usages); + field->new_value = (s32 *)(field->value + usages); field->report = report; return field; @@ -1541,9 +1542,8 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, __s32 max = field->logical_maximum; __s32 *value; - value = kmalloc_array(count, sizeof(__s32), GFP_ATOMIC); - if (!value) - return; + value = field->new_value; + memset(value, 0, count * sizeof(__s32)); for (n = 0; n < count; n++) { @@ -1557,7 +1557,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, value[n] >= min && value[n] <= max && value[n] - min < field->maxusage && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) - goto exit; + return; } for (n = 0; n < count; n++) { @@ -1581,8 +1581,6 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, } memcpy(field->value, value, count * sizeof(__s32)); -exit: - kfree(value); } /* diff --git a/include/linux/hid.h b/include/linux/hid.h index 7487b0586fe6..3fbfe0986659 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -476,6 +476,7 @@ struct hid_field { unsigned report_count; /* number of this field in the report */ unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ + __s32 *new_value; /* newly read value(s) */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; -- cgit v1.2.3-71-gd317 From b79c1abae5e19726c5060749e4e7c9e426b045c8 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:17 +0100 Subject: HID: core: split data fetching from processing in hid_input_field() This is a preparatory patch for being able to process the usages out of order. We split the retrieval of the data in a separate function and also split out the processing of the usages depending if the field is an array or a variable. No functional changes from this patch. Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 96 ++++++++++++++++++++++++++++++++++++++++---------- include/linux/hid.h | 3 +- 2 files changed, 79 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index fad4dbdf6391..34188d7ac0f7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1544,13 +1544,12 @@ static inline int hid_array_value_is_valid(struct hid_field *field, } /* - * Analyse a received field, and fetch the data from it. The field - * content is stored for next report processing (we do differential - * reporting to the layer). + * Fetch the field from the data. The field content is stored for next + * report processing (we do differential reporting to the layer). */ - -static void hid_input_field(struct hid_device *hid, struct hid_field *field, - __u8 *data, int interrupt) +static void hid_input_fetch_field(struct hid_device *hid, + struct hid_field *field, + __u8 *data) { unsigned n; unsigned count = field->report_count; @@ -1561,6 +1560,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, value = field->new_value; memset(value, 0, count * sizeof(__s32)); + field->ignored = false; for (n = 0; n < count; n++) { @@ -1572,21 +1572,56 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, /* Ignore report if ErrorRollOver */ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && hid_array_value_is_valid(field, value[n]) && - field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) + field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { + field->ignored = true; return; + } } +} - for (n = 0; n < count; n++) { +/* + * Process a received variable field. + */ - if (HID_MAIN_ITEM_VARIABLE & field->flags) { - hid_process_event(hid, - field, - &field->usage[n], - value[n], - interrupt); - continue; - } +static void hid_input_var_field(struct hid_device *hid, + struct hid_field *field, + int interrupt) +{ + unsigned int count = field->report_count; + __s32 *value = field->new_value; + unsigned int n; + + for (n = 0; n < count; n++) + hid_process_event(hid, + field, + &field->usage[n], + value[n], + interrupt); + + memcpy(field->value, value, count * sizeof(__s32)); +} +/* + * Process a received array field. The field content is stored for + * next report processing (we do differential reporting to the layer). + */ + +static void hid_input_array_field(struct hid_device *hid, + struct hid_field *field, + int interrupt) +{ + unsigned int n; + unsigned int count = field->report_count; + __s32 min = field->logical_minimum; + __s32 *value; + + value = field->new_value; + + /* ErrorRollOver */ + if (field->ignored) + return; + + for (n = 0; n < count; n++) { if (hid_array_value_is_valid(field, field->value[n]) && search(value, field->value[n], count)) hid_process_event(hid, @@ -1607,6 +1642,31 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, memcpy(field->value, value, count * sizeof(__s32)); } +/* + * Analyse a received report, and fetch the data from it. The field + * content is stored for next report processing (we do differential + * reporting to the layer). + */ +static void hid_process_report(struct hid_device *hid, + struct hid_report *report, + __u8 *data, + int interrupt) +{ + unsigned int a; + struct hid_field *field; + + for (a = 0; a < report->maxfield; a++) { + field = report->field[a]; + + hid_input_fetch_field(hid, field, data); + + if (field->flags & HID_MAIN_ITEM_VARIABLE) + hid_input_var_field(hid, field, interrupt); + else + hid_input_array_field(hid, field, interrupt); + } +} + /* * Output the field into the report. */ @@ -1768,7 +1828,6 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, struct hid_report_enum *report_enum = hid->report_enum + type; struct hid_report *report; struct hid_driver *hdrv; - unsigned int a; u32 rsize, csize = size; u8 *cdata = data; int ret = 0; @@ -1804,8 +1863,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, } if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { - for (a = 0; a < report->maxfield; a++) - hid_input_field(hid, report->field[a], cdata, interrupt); + hid_process_report(hid, report, cdata, interrupt); hdrv = hid->driver; if (hdrv && hdrv->report) hdrv->report(hid, report); diff --git a/include/linux/hid.h b/include/linux/hid.h index 3fbfe0986659..cf79eb3da465 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -342,7 +342,7 @@ struct hid_item { * HID device quirks. */ -/* +/* * Increase this if you need to configure more HID quirks at module load time */ #define MAX_USBHID_BOOT_QUIRKS 4 @@ -483,6 +483,7 @@ struct hid_field { __s32 physical_maximum; __s32 unit_exponent; unsigned unit; + bool ignored; /* this field is ignored in this event */ struct hid_report *report; /* associated report */ unsigned index; /* index into report->field[] */ /* hidinput data */ -- cgit v1.2.3-71-gd317 From 22f4b026c3ddd4b26c5baa202bd3ee38feaa2e9a Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:21 +0100 Subject: HID: compute an ordered list of input fields to process This will be used in a later commit: we build a list of input fields (and usage_index) that is ordered based on a usage priority. Changing the usage priority allows to re-order the processed list, meaning that we can enforce some usages to be process before others. For instance, before processing InRange in the HID tablets, we need to know if we are using the eraser (side or button). Enforcing a higher (lower number) priority for Invert allows to force the input stack to process that field before. Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/hid.h | 10 +++++ 2 files changed, 114 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 34188d7ac0f7..b38528118642 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -81,6 +81,7 @@ struct hid_report *hid_register_report(struct hid_device *device, report_enum->report_id_hash[id] = report; list_add_tail(&report->list, &report_enum->report_list); + INIT_LIST_HEAD(&report->field_entry_list); return report; } @@ -101,7 +102,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - 2 * usages * sizeof(unsigned int)), GFP_KERNEL); + 3 * usages * sizeof(unsigned int)), GFP_KERNEL); if (!field) return NULL; @@ -110,6 +111,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field->usage = (struct hid_usage *)(field + 1); field->value = (s32 *)(field->usage + usages); field->new_value = (s32 *)(field->value + usages); + field->usages_priorities = (s32 *)(field->new_value + usages); field->report = report; return field; @@ -657,6 +659,8 @@ static void hid_free_report(struct hid_report *report) { unsigned n; + kfree(report->field_entries); + for (n = 0; n < report->maxfield; n++) kfree(report->field[n]); kfree(report); @@ -1667,6 +1671,103 @@ static void hid_process_report(struct hid_device *hid, } } +/* + * Insert a given usage_index in a field in the list + * of processed usages in the report. + * + * The elements of lower priority score are processed + * first. + */ +static void __hid_insert_field_entry(struct hid_device *hid, + struct hid_report *report, + struct hid_field_entry *entry, + struct hid_field *field, + unsigned int usage_index) +{ + struct hid_field_entry *next; + + entry->field = field; + entry->index = usage_index; + entry->priority = field->usages_priorities[usage_index]; + + /* insert the element at the correct position */ + list_for_each_entry(next, + &report->field_entry_list, + list) { + /* + * the priority of our element is strictly higher + * than the next one, insert it before + */ + if (entry->priority > next->priority) { + list_add_tail(&entry->list, &next->list); + return; + } + } + + /* lowest priority score: insert at the end */ + list_add_tail(&entry->list, &report->field_entry_list); +} + +static void hid_report_process_ordering(struct hid_device *hid, + struct hid_report *report) +{ + struct hid_field *field; + struct hid_field_entry *entries; + unsigned int a, u, usages; + unsigned int count = 0; + + /* count the number of individual fields in the report */ + for (a = 0; a < report->maxfield; a++) { + field = report->field[a]; + + if (field->flags & HID_MAIN_ITEM_VARIABLE) + count += field->report_count; + else + count++; + } + + /* allocate the memory to process the fields */ + entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); + if (!entries) + return; + + report->field_entries = entries; + + /* + * walk through all fields in the report and + * store them by priority order in report->field_entry_list + * + * - Var elements are individualized (field + usage_index) + * - Arrays are taken as one, we can not chose an order for them + */ + usages = 0; + for (a = 0; a < report->maxfield; a++) { + field = report->field[a]; + + if (field->flags & HID_MAIN_ITEM_VARIABLE) { + for (u = 0; u < field->report_count; u++) { + __hid_insert_field_entry(hid, report, + &entries[usages], + field, u); + usages++; + } + } else { + __hid_insert_field_entry(hid, report, &entries[usages], + field, 0); + usages++; + } + } +} + +static void hid_process_ordering(struct hid_device *hid) +{ + struct hid_report *report; + struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; + + list_for_each_entry(report, &report_enum->report_list, list) + hid_report_process_ordering(hid, report); +} + /* * Output the field into the report. */ @@ -2050,6 +2151,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) return -ENODEV; } + hid_process_ordering(hdev); + if ((hdev->claimed & HID_CLAIMED_INPUT) && (connect_mask & HID_CONNECT_FF) && hdev->ff_init) hdev->ff_init(hdev); diff --git a/include/linux/hid.h b/include/linux/hid.h index cf79eb3da465..f25020c0d6b8 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -477,6 +477,7 @@ struct hid_field { unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ __s32 *new_value; /* newly read value(s) */ + __s32 *usages_priorities; /* priority of each usage when reading the report */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; @@ -493,13 +494,22 @@ struct hid_field { #define HID_MAX_FIELDS 256 +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + struct hid_report { struct list_head list; struct list_head hidinput_list; + struct list_head field_entry_list; /* ordered list of input fields */ unsigned int id; /* id of this report */ unsigned int type; /* report type */ unsigned int application; /* application usage for this report */ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ + struct hid_field_entry *field_entries; /* allocated memory of input field_entry */ unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ struct hid_device *device; /* associated device */ -- cgit v1.2.3-71-gd317 From 048cddfd440583a07530774fe20c7d26d7378155 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:23 +0100 Subject: HID: input: enforce Invert usage to be processed before InRange When a device exposes both Invert and InRange, Invert must be processed before InRange. If we keep the order of the device and we process them out of order, InRange will first set BTN_TOOL_PEN, and then Invert will set BTN_TOOL_RUBBER. Userspace knows how to deal with that situation, but fixing it in the kernel is now easier. Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 34 ++++++++++++++++++++++++++++++++-- include/linux/hid.h | 4 +++- 2 files changed, 35 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 8770d9a2b2af..61d91117f4ae 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -48,6 +48,25 @@ static const struct { __s32 y; } hid_hat_to_axis[] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; +/* + * hid-input will convert this list into priorities: + * the first element will have the highest priority + * (the length of the following array) and the last + * element the lowest (1). + * + * hid-input will then shift the priority by 8 bits to leave some space + * in case drivers want to interleave other fields. + * + * If drivers want to add fields before those, hid-input will + * leave out the first 8 bits of the priority value. + * + * This still leaves us 65535 individual priority values. + */ +static const __u32 hidinput_usages_priorities[] = { + HID_DG_INVERT, /* Invert must always come before In Range */ + HID_DG_INRANGE, +}; + #define map_abs(c) hid_map_usage(hidinput, usage, &bit, &max, EV_ABS, (c)) #define map_rel(c) hid_map_usage(hidinput, usage, &bit, &max, EV_REL, (c)) #define map_key(c) hid_map_usage(hidinput, usage, &bit, &max, EV_KEY, (c)) @@ -586,11 +605,12 @@ static bool hidinput_field_in_collection(struct hid_device *device, struct hid_f } static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field, - struct hid_usage *usage) + struct hid_usage *usage, unsigned int usage_index) { struct input_dev *input = hidinput->input; struct hid_device *device = input_get_drvdata(input); int max = 0, code; + unsigned int i = 0; unsigned long *bit = NULL; field->hidinput = hidinput; @@ -608,6 +628,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel goto ignore; } + /* assign a priority based on the static list declared here */ + for (i = 0; i < ARRAY_SIZE(hidinput_usages_priorities); i++) { + if (usage->hid == hidinput_usages_priorities[i]) { + field->usages_priorities[usage_index] = + (ARRAY_SIZE(hidinput_usages_priorities) - i) << 8; + break; + } + } + if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); @@ -1962,7 +1991,8 @@ static inline void hidinput_configure_usages(struct hid_input *hidinput, for (i = 0; i < report->maxfield; i++) for (j = 0; j < report->field[i]->maxusage; j++) hidinput_configure_usage(hidinput, report->field[i], - report->field[i]->usage + j); + report->field[i]->usage + j, + j); } /* diff --git a/include/linux/hid.h b/include/linux/hid.h index f25020c0d6b8..eaad0655b05c 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -477,7 +477,9 @@ struct hid_field { unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ __s32 *new_value; /* newly read value(s) */ - __s32 *usages_priorities; /* priority of each usage when reading the report */ + __s32 *usages_priorities; /* priority of each usage when reading the report + * bits 8-16 are reserved for hid-input usage + */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; -- cgit v1.2.3-71-gd317 From 87562fcd134214a68e58d0714b820f2f2da75b1f Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:24 +0100 Subject: HID: input: remove the need for HID_QUIRK_INVERT HID_QUIRK_INVERT is kind of complex to deal with and was bogus. Furthermore, it didn't make sense to use a global per struct hid_device quirk for something dynamic as the current state. Store the current tool information in the report itself, and re-order the processing of the fields to enforce having all the tablet "state" fields before getting to In Range and other input fields. This way, we now have all the information whether a tool is present or not while processing In Range. This new behavior enforces that only one tool gets forwarded to userspace at the same time, and that if either eraser or invert is set, we enforce BTN_TOOL_RUBBER. Note that the release of the previous tool now happens in its own EV_SYN report so userspace doesn't get confused by having 2 tools. These changes are tested in the following hid-tools regression tests: https://gitlab.freedesktop.org/libevdev/hid-tools/-/merge_requests/127 Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 98 +++++++++++++++++++++++++++++++++++++++++++++---- include/linux/hid.h | 6 ++- 2 files changed, 95 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 61d91117f4ae..9f8853640648 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -63,8 +63,11 @@ static const struct { * This still leaves us 65535 individual priority values. */ static const __u32 hidinput_usages_priorities[] = { + HID_DG_ERASER, /* Eraser (eraser touching) must always come before tipswitch */ HID_DG_INVERT, /* Invert must always come before In Range */ - HID_DG_INRANGE, + HID_DG_TIPSWITCH, /* Is the tip of the tool touching? */ + HID_DG_TIPPRESSURE, /* Tip Pressure might emulate tip switch */ + HID_DG_INRANGE, /* In Range needs to come after the other tool states */ }; #define map_abs(c) hid_map_usage(hidinput, usage, &bit, &max, EV_ABS, (c)) @@ -1365,9 +1368,38 @@ static void hidinput_handle_scroll(struct hid_usage *usage, input_event(input, EV_REL, usage->code, hi_res); } +static void hid_report_release_tool(struct hid_report *report, struct input_dev *input, + unsigned int tool) +{ + /* if the given tool is not currently reported, ignore */ + if (!test_bit(tool, input->key)) + return; + + /* + * if the given tool was previously set, release it, + * release any TOUCH and send an EV_SYN + */ + input_event(input, EV_KEY, BTN_TOUCH, 0); + input_event(input, EV_KEY, tool, 0); + input_event(input, EV_SYN, SYN_REPORT, 0); + + report->tool = 0; +} + +static void hid_report_set_tool(struct hid_report *report, struct input_dev *input, + unsigned int new_tool) +{ + if (report->tool != new_tool) + hid_report_release_tool(report, input, report->tool); + + input_event(input, EV_KEY, new_tool, 1); + report->tool = new_tool; +} + void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct input_dev *input; + struct hid_report *report = field->report; unsigned *quirks = &hid->quirks; if (!usage->type) @@ -1418,25 +1450,75 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct } switch (usage->hid) { + case HID_DG_ERASER: + report->tool_active |= !!value; + + /* + * if eraser is set, we must enforce BTN_TOOL_RUBBER + * to accommodate for devices not following the spec. + */ + if (value) + hid_report_set_tool(report, input, BTN_TOOL_RUBBER); + else if (report->tool != BTN_TOOL_RUBBER) + /* value is off, tool is not rubber, ignore */ + return; + + /* let hid-input set BTN_TOUCH */ + break; + case HID_DG_INVERT: - *quirks = value ? (*quirks | HID_QUIRK_INVERT) : (*quirks & ~HID_QUIRK_INVERT); + report->tool_active |= !!value; + + /* + * If invert is set, we store BTN_TOOL_RUBBER. + */ + if (value) + hid_report_set_tool(report, input, BTN_TOOL_RUBBER); + else if (!report->tool_active) + /* tool_active not set means Invert and Eraser are not set */ + hid_report_release_tool(report, input, BTN_TOOL_RUBBER); + + /* no further processing */ return; case HID_DG_INRANGE: - if (value) { - input_event(input, usage->type, (*quirks & HID_QUIRK_INVERT) ? BTN_TOOL_RUBBER : usage->code, 1); - return; + report->tool_active |= !!value; + + if (report->tool_active) { + /* + * if tool is not set but is marked as active, + * assume ours + */ + if (!report->tool) + hid_report_set_tool(report, input, usage->code); + } else { + hid_report_release_tool(report, input, usage->code); } - input_event(input, usage->type, usage->code, 0); - input_event(input, usage->type, BTN_TOOL_RUBBER, 0); + + /* reset tool_active for the next event */ + report->tool_active = false; + + /* no further processing */ return; + case HID_DG_TIPSWITCH: + report->tool_active |= !!value; + + /* if tool is set to RUBBER we should ignore the current value */ + if (report->tool == BTN_TOOL_RUBBER) + return; + + break; + case HID_DG_TIPPRESSURE: if (*quirks & HID_QUIRK_NOTOUCH) { int a = field->logical_minimum; int b = field->logical_maximum; - input_event(input, EV_KEY, BTN_TOUCH, value > a + ((b - a) >> 3)); + if (value > a + ((b - a) >> 3)) { + input_event(input, EV_KEY, BTN_TOUCH, 1); + report->tool_active = true; + } } break; diff --git a/include/linux/hid.h b/include/linux/hid.h index eaad0655b05c..feb8df61168f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -347,7 +347,7 @@ struct hid_item { */ #define MAX_USBHID_BOOT_QUIRKS 4 -#define HID_QUIRK_INVERT BIT(0) +/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */ #define HID_QUIRK_NOTOUCH BIT(1) #define HID_QUIRK_IGNORE BIT(2) #define HID_QUIRK_NOGET BIT(3) @@ -515,6 +515,10 @@ struct hid_report { unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ struct hid_device *device; /* associated device */ + + /* tool related state */ + bool tool_active; /* whether the current tool is active */ + unsigned int tool; /* BTN_TOOL_* */ }; #define HID_MAX_IDS 256 -- cgit v1.2.3-71-gd317 From 5c20000a4756f57c824e3045c978ef19136a676d Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Thu, 3 Feb 2022 15:32:25 +0100 Subject: HID: input: accommodate priorities for slotted devices Multitouch devices in hybrid mode are reporting multiple times the same collection. We should accommodate for this in our handling of priorities by defining the slots they belong to. Signed-off-by: Benjamin Tissoires Reviewed-by: Ping Cheng Acked-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 103 ++++++++++++++++++++++++++++++++++++++++++++---- include/linux/hid.h | 1 + 2 files changed, 96 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 9f8853640648..56d4e91c4750 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -48,6 +48,16 @@ static const struct { __s32 y; } hid_hat_to_axis[] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; +struct usage_priority { + __u32 usage; /* the HID usage associated */ + bool global; /* we assume all usages to be slotted, + * unless global + */ + unsigned int slot_overwrite; /* for globals: allows to set the usage + * before or after the slots + */ +}; + /* * hid-input will convert this list into priorities: * the first element will have the highest priority @@ -57,17 +67,30 @@ static const struct { * hid-input will then shift the priority by 8 bits to leave some space * in case drivers want to interleave other fields. * + * To accommodate slotted devices, the slot priority is + * defined in the next 8 bits (defined by 0xff - slot). + * * If drivers want to add fields before those, hid-input will * leave out the first 8 bits of the priority value. * * This still leaves us 65535 individual priority values. */ -static const __u32 hidinput_usages_priorities[] = { - HID_DG_ERASER, /* Eraser (eraser touching) must always come before tipswitch */ - HID_DG_INVERT, /* Invert must always come before In Range */ - HID_DG_TIPSWITCH, /* Is the tip of the tool touching? */ - HID_DG_TIPPRESSURE, /* Tip Pressure might emulate tip switch */ - HID_DG_INRANGE, /* In Range needs to come after the other tool states */ +static const struct usage_priority hidinput_usages_priorities[] = { + { /* Eraser (eraser touching) must always come before tipswitch */ + .usage = HID_DG_ERASER, + }, + { /* Invert must always come before In Range */ + .usage = HID_DG_INVERT, + }, + { /* Is the tip of the tool touching? */ + .usage = HID_DG_TIPSWITCH, + }, + { /* Tip Pressure might emulate tip switch */ + .usage = HID_DG_TIPPRESSURE, + }, + { /* In Range needs to come after the other tool states */ + .usage = HID_DG_INRANGE, + }, }; #define map_abs(c) hid_map_usage(hidinput, usage, &bit, &max, EV_ABS, (c)) @@ -612,6 +635,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel { struct input_dev *input = hidinput->input; struct hid_device *device = input_get_drvdata(input); + const struct usage_priority *usage_priority = NULL; int max = 0, code; unsigned int i = 0; unsigned long *bit = NULL; @@ -633,13 +657,26 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel /* assign a priority based on the static list declared here */ for (i = 0; i < ARRAY_SIZE(hidinput_usages_priorities); i++) { - if (usage->hid == hidinput_usages_priorities[i]) { + if (usage->hid == hidinput_usages_priorities[i].usage) { + usage_priority = &hidinput_usages_priorities[i]; + field->usages_priorities[usage_index] = (ARRAY_SIZE(hidinput_usages_priorities) - i) << 8; break; } } + /* + * For slotted devices, we need to also add the slot index + * in the priority. + */ + if (usage_priority && usage_priority->global) + field->usages_priorities[usage_index] |= + usage_priority->slot_overwrite; + else + field->usages_priorities[usage_index] |= + (0xff - field->slot_idx) << 16; + if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); @@ -2068,7 +2105,57 @@ static struct hid_input *hidinput_match_application(struct hid_report *report) static inline void hidinput_configure_usages(struct hid_input *hidinput, struct hid_report *report) { - int i, j; + int i, j, k; + int first_field_index = 0; + int slot_collection_index = -1; + int prev_collection_index = -1; + unsigned int slot_idx = 0; + struct hid_field *field; + + /* + * First tag all the fields that are part of a slot, + * a slot needs to have one Contact ID in the collection + */ + for (i = 0; i < report->maxfield; i++) { + field = report->field[i]; + + /* ignore fields without usage */ + if (field->maxusage < 1) + continue; + + /* + * janitoring when collection_index changes + */ + if (prev_collection_index != field->usage->collection_index) { + prev_collection_index = field->usage->collection_index; + first_field_index = i; + } + + /* + * if we already found a Contact ID in the collection, + * tag and continue to the next. + */ + if (slot_collection_index == field->usage->collection_index) { + field->slot_idx = slot_idx; + continue; + } + + /* check if the current field has Contact ID */ + for (j = 0; j < field->maxusage; j++) { + if (field->usage[j].hid == HID_DG_CONTACTID) { + slot_collection_index = field->usage->collection_index; + slot_idx++; + + /* + * mark all previous fields and this one in the + * current collection to be slotted. + */ + for (k = first_field_index; k <= i; k++) + report->field[k]->slot_idx = slot_idx; + break; + } + } + } for (i = 0; i < report->maxfield; i++) for (j = 0; j < report->field[i]->maxusage; j++) diff --git a/include/linux/hid.h b/include/linux/hid.h index feb8df61168f..4363a63b9775 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -492,6 +492,7 @@ struct hid_field { /* hidinput data */ struct hid_input *hidinput; /* associated input structure */ __u16 dpad; /* dpad input code */ + unsigned int slot_idx; /* slot index in a report */ }; #define HID_MAX_FIELDS 256 -- cgit v1.2.3-71-gd317 From dc6e0818bc9a0336d9accf3ea35d146d72aa7a18 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Sun, 20 Feb 2022 13:14:25 +0800 Subject: sched/cpuacct: Optimize away RCU read lock Since cpuacct_charge() is called from the scheduler update_curr(), we must already have rq lock held, then the RCU read lock can be optimized away. And do the same thing in it's wrapper cgroup_account_cputime(), but we can't use lockdep_assert_rq_held() there, which defined in kernel/sched/sched.h. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Chengming Zhou Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220220051426.5274-2-zhouchengming@bytedance.com --- include/linux/cgroup.h | 2 -- kernel/sched/cpuacct.c | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 75c151413fda..9a109c6ac0e0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -791,11 +791,9 @@ static inline void cgroup_account_cputime(struct task_struct *task, cpuacct_charge(task, delta_exec); - rcu_read_lock(); cgrp = task_dfl_cgroup(task); if (cgroup_parent(cgrp)) __cgroup_account_cputime(cgrp, delta_exec); - rcu_read_unlock(); } static inline void cgroup_account_cputime_field(struct task_struct *task, diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 307800586ac8..f79f88456d72 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -337,12 +337,10 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime) unsigned int cpu = task_cpu(tsk); struct cpuacct *ca; - rcu_read_lock(); + lockdep_assert_rq_held(cpu_rq(cpu)); for (ca = task_ca(tsk); ca; ca = parent_ca(ca)) *per_cpu_ptr(ca->cpuusage, cpu) += cputime; - - rcu_read_unlock(); } /* -- cgit v1.2.3-71-gd317 From 3eba0505d03a9c1eb30d40c2330c0880b22d1b3a Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Sun, 20 Feb 2022 13:14:26 +0800 Subject: sched/cpuacct: Remove redundant RCU read lock The cpuacct_account_field() and it's cgroup v2 wrapper cgroup_account_cputime_field() is only called from cputime in task_group_account_field(), which is already in RCU read-side critical section. So remove these redundant RCU read lock. Suggested-by: Tejun Heo Signed-off-by: Chengming Zhou Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220220051426.5274-3-zhouchengming@bytedance.com --- include/linux/cgroup.h | 2 -- kernel/sched/cpuacct.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 9a109c6ac0e0..1e356c222756 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -804,11 +804,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task, cpuacct_account_field(task, index, delta_exec); - rcu_read_lock(); cgrp = task_dfl_cgroup(task); if (cgroup_parent(cgrp)) __cgroup_account_cputime_field(cgrp, index, delta_exec); - rcu_read_unlock(); } #else /* CONFIG_CGROUPS */ diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index f79f88456d72..d269ede84e85 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -352,10 +352,8 @@ void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) { struct cpuacct *ca; - rcu_read_lock(); for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca)) __this_cpu_add(ca->cpustat->cpustat[index], val); - rcu_read_unlock(); } struct cgroup_subsys cpuacct_cgrp_subsys = { -- cgit v1.2.3-71-gd317 From fa2c3254d7cfff5f7a916ab928a562d1165f17bb Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 20 Jan 2022 16:25:19 +0000 Subject: sched/tracing: Don't re-read p->state when emitting sched_switch event As of commit c6e7bd7afaeb ("sched/core: Optimize ttwu() spinning on p->on_cpu") the following sequence becomes possible: p->__state = TASK_INTERRUPTIBLE; __schedule() deactivate_task(p); ttwu() READ !p->on_rq p->__state=TASK_WAKING trace_sched_switch() __trace_sched_switch_state() task_state_index() return 0; TASK_WAKING isn't in TASK_REPORT, so the task appears as TASK_RUNNING in the trace event. Prevent this by pushing the value read from __schedule() down the trace event. Reported-by: Abhijeet Dharmapurikar Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20220120162520.570782-2-valentin.schneider@arm.com --- include/linux/sched.h | 11 ++++++++--- include/trace/events/sched.h | 11 +++++++---- kernel/sched/core.c | 4 ++-- kernel/trace/fgraph.c | 4 +++- kernel/trace/ftrace.c | 4 +++- kernel/trace/trace_events.c | 8 ++++++-- kernel/trace/trace_osnoise.c | 4 +++- kernel/trace/trace_sched_switch.c | 1 + kernel/trace/trace_sched_wakeup.c | 1 + 9 files changed, 34 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index f00132e7ef6e..457c8a058b77 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1620,10 +1620,10 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) -static inline unsigned int task_state_index(struct task_struct *tsk) +static inline unsigned int __task_state_index(unsigned int tsk_state, + unsigned int tsk_exit_state) { - unsigned int tsk_state = READ_ONCE(tsk->__state); - unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; + unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); @@ -1633,6 +1633,11 @@ static inline unsigned int task_state_index(struct task_struct *tsk) return fls(state); } +static inline unsigned int task_state_index(struct task_struct *tsk) +{ + return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); +} + static inline char task_index_to_char(unsigned int state) { static const char state_char[] = "RSDTtXZPI"; diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 94640482cfe7..65e786756321 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -187,7 +187,9 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_ARGS(p)); #ifdef CREATE_TRACE_POINTS -static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +static inline long __trace_sched_switch_state(bool preempt, + unsigned int prev_state, + struct task_struct *p) { unsigned int state; @@ -208,7 +210,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * * it for left shift operation to get the correct task->state * mapping. */ - state = task_state_index(p); + state = __task_state_index(prev_state, p->exit_state); return state ? (1 << (state - 1)) : state; } @@ -220,10 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * TRACE_EVENT(sched_switch, TP_PROTO(bool preempt, + unsigned int prev_state, struct task_struct *prev, struct task_struct *next), - TP_ARGS(preempt, prev, next), + TP_ARGS(preempt, prev_state, prev, next), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) @@ -239,7 +242,7 @@ TRACE_EVENT(sched_switch, memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; - __entry->prev_state = __trace_sched_switch_state(preempt, prev); + __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ef946123e9af..3aafc15da24a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4836,7 +4836,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) { struct rq *rq = this_rq(); struct mm_struct *mm = rq->prev_mm; - long prev_state; + unsigned int prev_state; /* * The previous task will have left us with a preempt_count of 2 @@ -6300,7 +6300,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) migrate_disable_switch(rq, prev); psi_sched_switch(prev, next, !task_on_rq_queued(prev)); - trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next); + trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev_state, prev, next); /* Also unlocks the rq: */ rq = context_switch(rq, prev, next, &rf); diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 22061d38fc00..19028e072cdb 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -415,7 +415,9 @@ free: static void ftrace_graph_probe_sched_switch(void *ignore, bool preempt, - struct task_struct *prev, struct task_struct *next) + unsigned int prev_state, + struct task_struct *prev, + struct task_struct *next) { unsigned long long timestamp; int index; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f9feb197b2da..6762ae029fdd 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7347,7 +7347,9 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) static void ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, - struct task_struct *prev, struct task_struct *next) + unsigned int prev_state, + struct task_struct *prev, + struct task_struct *next) { struct trace_array *tr = data; struct trace_pid_list *pid_list; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3147614c1812..2a19ea747ff4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -759,7 +759,9 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) static void event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, - struct task_struct *prev, struct task_struct *next) + unsigned int prev_state, + struct task_struct *prev, + struct task_struct *next) { struct trace_array *tr = data; struct trace_pid_list *no_pid_list; @@ -783,7 +785,9 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, static void event_filter_pid_sched_switch_probe_post(void *data, bool preempt, - struct task_struct *prev, struct task_struct *next) + unsigned int prev_state, + struct task_struct *prev, + struct task_struct *next) { struct trace_array *tr = data; struct trace_pid_list *no_pid_list; diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 870a08da5b48..1829b4cb8cc1 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1167,7 +1167,9 @@ thread_exit(struct osnoise_variables *osn_var, struct task_struct *t) * used to record the beginning and to report the end of a thread noise window. */ static void -trace_sched_switch_callback(void *data, bool preempt, struct task_struct *p, +trace_sched_switch_callback(void *data, bool preempt, + unsigned int prev_state, + struct task_struct *p, struct task_struct *n) { struct osnoise_variables *osn_var = this_cpu_osn_var(); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index e304196d7c28..993b0ed10d8c 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -22,6 +22,7 @@ static DEFINE_MUTEX(sched_register_mutex); static void probe_sched_switch(void *ignore, bool preempt, + unsigned int prev_state, struct task_struct *prev, struct task_struct *next) { int flags; diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 2402de520eca..46429f9a96fa 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -426,6 +426,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, static void notrace probe_wakeup_sched_switch(void *ignore, bool preempt, + unsigned int prev_state, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; -- cgit v1.2.3-71-gd317 From 25795ef6299f07ce3838f3253a9cb34f64efcfae Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 20 Jan 2022 16:25:20 +0000 Subject: sched/tracing: Report TASK_RTLOCK_WAIT tasks as TASK_UNINTERRUPTIBLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TASK_RTLOCK_WAIT currently isn't part of TASK_REPORT, thus a task blocking on an rtlock will appear as having a task state == 0, IOW TASK_RUNNING. The actual state is saved in p->saved_state, but reading it after reading p->__state has a few issues: o that could still be TASK_RUNNING in the case of e.g. rt_spin_lock o ttwu_state_match() might have changed that to TASK_RUNNING As pointed out by Eric, adding TASK_RTLOCK_WAIT to TASK_REPORT implies exposing a new state to userspace tools which way not know what to do with them. The only information that needs to be conveyed here is that a task is waiting on an rt_mutex, which matches TASK_UNINTERRUPTIBLE - there's no need for a new state. Reported-by: Uwe Kleine-König Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20220120162520.570782-3-valentin.schneider@arm.com --- include/linux/sched.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 457c8a058b77..78b606c9ab38 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1630,6 +1630,14 @@ static inline unsigned int __task_state_index(unsigned int tsk_state, if (tsk_state == TASK_IDLE) state = TASK_REPORT_IDLE; + /* + * We're lying here, but rather than expose a completely new task state + * to userspace, we can make this appear as if the task has gone through + * a regular rt_mutex_lock() call. + */ + if (tsk_state == TASK_RTLOCK_WAIT) + state = TASK_UNINTERRUPTIBLE; + return fls(state); } -- cgit v1.2.3-71-gd317 From 6ddf5f165f13ab623d04aee2a473d35818255199 Mon Sep 17 00:00:00 2001 From: Milind Changire Date: Mon, 14 Feb 2022 05:01:01 +0000 Subject: ceph: add getvxattr op Problem: Some directory vxattrs (e.g. ceph.dir.pin.random) are governed by information that isn't necessarily shared with the client. Add support for the new GETVXATTR operation, which allows the client to query the MDS directly for vxattrs. When the client is queried for a vxattr that doesn't have a special handler, have it issue a GETVXATTR to the MDS directly. Solution: Adds new getvxattr op to fetch ceph.dir.pin*, ceph.dir.layout* and ceph.file.layout* vxattrs. If the entire layout for a dir or a file is being set, then it is expected that the layout be set in standard JSON format. Individual field value retrieval is not wrapped in JSON. The JSON format also applies while setting the vxattr if the entire layout is being set in one go. As a temporary measure, setting a vxattr can also be done in the old format. The old format will be deprecated in the future. URL: https://tracker.ceph.com/issues/51062 Signed-off-by: Milind Changire Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/inode.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/mds_client.c | 24 +++++++++++++++++++++ fs/ceph/mds_client.h | 6 ++++++ fs/ceph/strings.c | 1 + fs/ceph/super.h | 1 + fs/ceph/xattr.c | 13 +++++++++-- include/linux/ceph/ceph_fs.h | 1 + 7 files changed, 95 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 22adddb3d051..7b1e93c8a0d2 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -2301,6 +2301,57 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page, return err; } +int ceph_do_getvxattr(struct inode *inode, const char *name, void *value, + size_t size) +{ + struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); + struct ceph_mds_client *mdsc = fsc->mdsc; + struct ceph_mds_request *req; + int mode = USE_AUTH_MDS; + int err; + char *xattr_value; + size_t xattr_value_len; + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode); + if (IS_ERR(req)) { + err = -ENOMEM; + goto out; + } + + req->r_path2 = kstrdup(name, GFP_NOFS); + if (!req->r_path2) { + err = -ENOMEM; + goto put; + } + + ihold(inode); + req->r_inode = inode; + err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err < 0) + goto put; + + xattr_value = req->r_reply_info.xattr_info.xattr_value; + xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len; + + dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size); + + err = (int)xattr_value_len; + if (size == 0) + goto put; + + if (xattr_value_len > size) { + err = -ERANGE; + goto put; + } + + memcpy(value, xattr_value, xattr_value_len); +put: + ceph_mdsc_put_request(req); +out: + dout("do_getvxattr result=%d\n", err); + return err; +} + /* * Check inode permissions. We verify we have a valid value for diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f3abbf83ea5b..0b7bde73bf03 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -555,6 +555,28 @@ bad: return -EIO; } +static int parse_reply_info_getvxattr(void **p, void *end, + struct ceph_mds_reply_info_parsed *info, + u64 features) +{ + u32 value_len; + + ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */ + ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */ + ceph_decode_skip_32(p, end, bad); /* skip payload length */ + + ceph_decode_32_safe(p, end, value_len, bad); + + if (value_len == end - *p) { + info->xattr_info.xattr_value = *p; + info->xattr_info.xattr_value_len = value_len; + *p = end; + return value_len; + } +bad: + return -EIO; +} + /* * parse extra results */ @@ -570,6 +592,8 @@ static int parse_reply_info_extra(void **p, void *end, return parse_reply_info_readdir(p, end, info, features); else if (op == CEPH_MDS_OP_CREATE) return parse_reply_info_create(p, end, info, features, s); + else if (op == CEPH_MDS_OP_GETVXATTR) + return parse_reply_info_getvxattr(p, end, info, features); else return -EIO; } diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index ab12f3ce81a3..33497846e47e 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -100,6 +100,11 @@ struct ceph_mds_reply_dir_entry { loff_t offset; }; +struct ceph_mds_reply_xattr { + char *xattr_value; + size_t xattr_value_len; +}; + /* * parsed info about an mds reply, including information about * either: 1) the target inode and/or its parent directory and dentry, @@ -115,6 +120,7 @@ struct ceph_mds_reply_info_parsed { char *dname; u32 dname_len; struct ceph_mds_reply_lease *dlease; + struct ceph_mds_reply_xattr xattr_info; /* extra */ union { diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c index 573bb9556fb5..e36e8948e728 100644 --- a/fs/ceph/strings.c +++ b/fs/ceph/strings.c @@ -60,6 +60,7 @@ const char *ceph_mds_op_name(int op) case CEPH_MDS_OP_LOOKUPINO: return "lookupino"; case CEPH_MDS_OP_LOOKUPNAME: return "lookupname"; case CEPH_MDS_OP_GETATTR: return "getattr"; + case CEPH_MDS_OP_GETVXATTR: return "getvxattr"; case CEPH_MDS_OP_SETXATTR: return "setxattr"; case CEPH_MDS_OP_SETATTR: return "setattr"; case CEPH_MDS_OP_RMXATTR: return "rmxattr"; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index a35c27489901..4569f802ddbb 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -1048,6 +1048,7 @@ static inline bool ceph_inode_is_shutdown(struct inode *inode) /* xattr.c */ int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); +int ceph_do_getvxattr(struct inode *inode, const char *name, void *value, size_t size); ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index fcf7dfdecf96..afec84088471 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -923,10 +923,13 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_xattr *xattr; - struct ceph_vxattr *vxattr = NULL; + struct ceph_vxattr *vxattr; int req_mask; ssize_t err; + if (strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) + goto handle_non_vxattrs; + /* let's see if a virtual xattr was requested */ vxattr = ceph_match_vxattr(inode, name); if (vxattr) { @@ -945,8 +948,14 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, err = -ERANGE; } return err; + } else { + err = ceph_do_getvxattr(inode, name, value, size); + /* this would happen with a new client and old server combo */ + if (err == -EOPNOTSUPP) + err = -ENODATA; + return err; } - +handle_non_vxattrs: req_mask = __get_request_mask(inode); spin_lock(&ci->i_ceph_lock); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 7ad6c3d0db7d..66db21ac5f0c 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -328,6 +328,7 @@ enum { CEPH_MDS_OP_LOOKUPPARENT = 0x00103, CEPH_MDS_OP_LOOKUPINO = 0x00104, CEPH_MDS_OP_LOOKUPNAME = 0x00105, + CEPH_MDS_OP_GETVXATTR = 0x00106, CEPH_MDS_OP_SETXATTR = 0x01105, CEPH_MDS_OP_RMXATTR = 0x01106, -- cgit v1.2.3-71-gd317 From ab58a5a1c0487b67f7409f39d3c8593d416d4e7f Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Tue, 15 Feb 2022 20:23:14 +0800 Subject: ceph: move to a dedicated slabcache for ceph_cap_snap There could be huge number of capsnaps around at any given time. On x86_64 the structure is 248 bytes, which will be rounded up to 256 bytes by kzalloc. Move this to a dedicated slabcache to save 8 bytes for each. [ jlayton: use kmem_cache_zalloc ] Signed-off-by: Xiubo Li Signed-off-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/snap.c | 5 +++-- fs/ceph/super.c | 7 +++++++ fs/ceph/super.h | 2 +- include/linux/ceph/libceph.h | 1 + 4 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index b41e6724c591..bc5ec72d958c 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -482,7 +482,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) struct ceph_buffer *old_blob = NULL; int used, dirty; - capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); + capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS); if (!capsnap) { pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); return; @@ -603,7 +603,8 @@ update_snapc: spin_unlock(&ci->i_ceph_lock); ceph_buffer_put(old_blob); - kfree(capsnap); + if (capsnap) + kmem_cache_free(ceph_cap_snap_cachep, capsnap); ceph_put_snap_context(old_snapc); } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index bf79f369aec6..978463fa822c 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -864,6 +864,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) */ struct kmem_cache *ceph_inode_cachep; struct kmem_cache *ceph_cap_cachep; +struct kmem_cache *ceph_cap_snap_cachep; struct kmem_cache *ceph_cap_flush_cachep; struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; @@ -892,6 +893,9 @@ static int __init init_caches(void) ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD); if (!ceph_cap_cachep) goto bad_cap; + ceph_cap_snap_cachep = KMEM_CACHE(ceph_cap_snap, SLAB_MEM_SPREAD); + if (!ceph_cap_snap_cachep) + goto bad_cap_snap; ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (!ceph_cap_flush_cachep) @@ -931,6 +935,8 @@ bad_file: bad_dentry: kmem_cache_destroy(ceph_cap_flush_cachep); bad_cap_flush: + kmem_cache_destroy(ceph_cap_snap_cachep); +bad_cap_snap: kmem_cache_destroy(ceph_cap_cachep); bad_cap: kmem_cache_destroy(ceph_inode_cachep); @@ -947,6 +953,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_cap_cachep); + kmem_cache_destroy(ceph_cap_snap_cachep); kmem_cache_destroy(ceph_cap_flush_cachep); kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_file_cachep); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 4569f802ddbb..a2caa7beca4f 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -230,7 +230,7 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) if (refcount_dec_and_test(&capsnap->nref)) { if (capsnap->xattr_blob) ceph_buffer_put(capsnap->xattr_blob); - kfree(capsnap); + kmem_cache_free(ceph_cap_snap_cachep, capsnap); } } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index edf62eaa6285..00af2c98da75 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -284,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_snap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; -- cgit v1.2.3-71-gd317 From 1753629ea0f34900467185b7d8b0db11a64f4728 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Wed, 23 Feb 2022 09:04:55 +0800 Subject: ceph: remove incorrect and unused CEPH_INO_DOTDOT macro Ceph have removed this macro and the 0x3 will be use for global dummy snaprealm. Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- include/linux/ceph/ceph_fs.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 66db21ac5f0c..f14f9bc290e6 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -29,7 +29,6 @@ #define CEPH_INO_ROOT 1 #define CEPH_INO_CEPH 2 /* hidden .ceph dir */ -#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */ /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ #define CEPH_MAX_MON 31 -- cgit v1.2.3-71-gd317 From 5ed91587e201c77b35a5555c8c082655bb5834fe Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Wed, 23 Feb 2022 09:04:56 +0800 Subject: ceph: do not release the global snaprealm until unmounting The global snaprealm would be created and then destroyed immediately every time when updating it. URL: https://tracker.ceph.com/issues/54362 Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 2 +- fs/ceph/snap.c | 13 +++++++++++-- fs/ceph/super.h | 2 +- include/linux/ceph/ceph_fs.h | 3 ++- 4 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ef9145477aae..fa38c013126d 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4838,7 +4838,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) mutex_unlock(&mdsc->mutex); ceph_cleanup_snapid_map(mdsc); - ceph_cleanup_empty_realms(mdsc); + ceph_cleanup_global_and_empty_realms(mdsc); cancel_work_sync(&mdsc->cap_reclaim_work); cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 66a1a92cf579..cc9097c27052 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -121,7 +121,11 @@ static struct ceph_snap_realm *ceph_create_snap_realm( if (!realm) return ERR_PTR(-ENOMEM); - atomic_set(&realm->nref, 1); /* for caller */ + /* Do not release the global dummy snaprealm until unmouting */ + if (ino == CEPH_INO_GLOBAL_SNAPREALM) + atomic_set(&realm->nref, 2); + else + atomic_set(&realm->nref, 1); realm->ino = ino; INIT_LIST_HEAD(&realm->children); INIT_LIST_HEAD(&realm->child_item); @@ -261,9 +265,14 @@ static void __cleanup_empty_realms(struct ceph_mds_client *mdsc) spin_unlock(&mdsc->snap_empty_lock); } -void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc) +void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc) { + struct ceph_snap_realm *global_realm; + down_write(&mdsc->snap_rwsem); + global_realm = __lookup_snap_realm(mdsc, CEPH_INO_GLOBAL_SNAPREALM); + if (global_realm) + ceph_put_snap_realm(mdsc, global_realm); __cleanup_empty_realms(mdsc); up_write(&mdsc->snap_rwsem); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ef9f32ec905e..0b4b519682f1 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -940,7 +940,7 @@ extern void ceph_handle_snap(struct ceph_mds_client *mdsc, struct ceph_msg *msg); extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap); -extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); +extern void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc); extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc, u64 snap); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index f14f9bc290e6..86bf82dbd8b8 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -28,7 +28,8 @@ #define CEPH_INO_ROOT 1 -#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ +#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */ /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ #define CEPH_MAX_MON 31 -- cgit v1.2.3-71-gd317 From 2cbfae0f50f7a0f8fc9d552bd856f6cd7b7608b6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 24 Feb 2022 01:56:20 +0200 Subject: ACPI: platform: Constify properties parameter in acpi_create_platform_device() Properties are not and should not be changed in the callee, hence constify properties parameter in acpi_create_platform_device(). Signed-off-by: Andy Shevchenko Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_platform.c | 2 +- include/linux/acpi.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 78d621290a35..de3cbf152dee 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -95,7 +95,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev, * Name of the platform device will be the same as @adev's. */ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) + const struct property_entry *properties) { struct platform_device *pdev = NULL; struct platform_device_info pdevinfo; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..9ac545379447 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -691,7 +691,7 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); struct platform_device *acpi_create_platform_device(struct acpi_device *, - struct property_entry *); + const struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -930,7 +930,7 @@ static inline int acpi_device_modalias(struct device *dev, static inline struct platform_device * acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) + const struct property_entry *properties) { return NULL; } -- cgit v1.2.3-71-gd317 From d65bc29be0ae4ca2368df25dc6f6247aefb57f07 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sun, 13 Feb 2022 22:25:20 +0300 Subject: binfmt: move more stuff undef CONFIG_COREDUMP struct linux_binfmt::core_dump and struct min_coredump::min_coredump are used under CONFIG_COREDUMP only. Shrink those embedded configs a bit. Signed-off-by: Alexey Dobriyan Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/YglbIFyN+OtwVyjW@localhost.localdomain --- fs/binfmt_elf.c | 2 ++ fs/binfmt_elf_fdpic.c | 2 +- fs/binfmt_flat.c | 2 ++ include/linux/binfmts.h | 2 ++ 4 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 65ea5381c5c7..d0c1703b9576 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -101,8 +101,10 @@ static struct linux_binfmt elf_format = { .module = THIS_MODULE, .load_binary = load_elf_binary, .load_shlib = load_elf_library, +#ifdef CONFIG_COREDUMP .core_dump = elf_core_dump, .min_coredump = ELF_EXEC_PAGESIZE, +#endif }; #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index c6f588dc4a9d..7fa6e6632d9d 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -83,8 +83,8 @@ static struct linux_binfmt elf_fdpic_format = { .load_binary = load_elf_fdpic_binary, #ifdef CONFIG_ELF_CORE .core_dump = elf_fdpic_core_dump, -#endif .min_coredump = ELF_EXEC_PAGESIZE, +#endif }; static int __init init_elf_fdpic_binfmt(void) diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 5d776f80ee50..5f0bf24bb3b8 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -102,8 +102,10 @@ static int flat_core_dump(struct coredump_params *cprm); static struct linux_binfmt flat_format = { .module = THIS_MODULE, .load_binary = load_flat_binary, +#ifdef CONFIG_COREDUMP .core_dump = flat_core_dump, .min_coredump = PAGE_SIZE +#endif }; /****************************************************************************/ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 049cf9421d83..5d651c219c99 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -98,8 +98,10 @@ struct linux_binfmt { struct module *module; int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); +#ifdef CONFIG_COREDUMP int (*core_dump)(struct coredump_params *cprm); unsigned long min_coredump; /* minimal dump size */ +#endif } __randomize_layout; extern void __register_binfmt(struct linux_binfmt *fmt, int insert); -- cgit v1.2.3-71-gd317 From 26440303310591e29121964ede0048583cb3126d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Feb 2022 18:55:52 +0100 Subject: scsi: core: Remove This header is empty now except for an include of , so remove it. Link: https://lore.kernel.org/r/20220224175552.988286-9-hch@lst.de Reviewed-by: Bart Van Assche Reviewed-by: John Garry Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/cdrom/cdrom.c | 1 - drivers/scsi/scsi_transport_sas.c | 1 - include/linux/bsg-lib.h | 1 - include/scsi/scsi_cmnd.h | 1 - include/scsi/scsi_request.h | 7 ------- 5 files changed, 11 deletions(-) delete mode 100644 include/scsi/scsi_request.h (limited to 'include/linux') diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 1b57d4666e43..7bd10d63ddbe 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -284,7 +284,6 @@ #include #include #include -#include /* used to tell the module to turn on full debugging messages */ static bool debug; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 4ee578b181da..12bff64dade6 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 6b211323a489..9e97ced2896d 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -10,7 +10,6 @@ #define _BLK_BSG_ #include -#include struct bsg_job; struct request; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 4b33ca6a7c7d..76c5eaeeb3b5 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -10,7 +10,6 @@ #include #include #include -#include struct Scsi_Host; struct scsi_driver; diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h deleted file mode 100644 index 6d424d3e8d02..000000000000 --- a/include/scsi/scsi_request.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _SCSI_SCSI_REQUEST_H -#define _SCSI_SCSI_REQUEST_H - -#include - -#endif /* _SCSI_SCSI_REQUEST_H */ -- cgit v1.2.3-71-gd317 From 728dd0ab37421396927749fc8cec9c2009c526c8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 22 Feb 2022 08:59:33 -0500 Subject: NFS: Don't re-read the entire page cache to find the next cookie If the page cache entry that was last read gets invalidated for some reason, then make sure we can re-create it on the next call to readdir. This, combined with the cache page validation, allows us to reuse the cached value of page-index on successive calls to nfs_readdir. Credit is due to Benjamin Coddington for showing that the concept works, and that it allows for improved cache sharing between processes even in the case where pages are lost due to LRU or active invalidation. Suggested-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 10 +++++++--- include/linux/nfs_fs.h | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a1767f755460..93f70698e401 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1120,6 +1120,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->dup_cookie = dir_ctx->dup_cookie; desc->duped = dir_ctx->duped; page_index = dir_ctx->page_index; + desc->page_index = page_index; + desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); @@ -1168,6 +1170,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; dir_ctx->dup_cookie = desc->dup_cookie; + dir_ctx->last_cookie = desc->last_cookie; dir_ctx->duped = desc->duped; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; @@ -1209,10 +1212,11 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) } if (offset != filp->f_pos) { filp->f_pos = offset; - if (nfs_readdir_use_cookie(filp)) - dir_ctx->dir_cookie = offset; - else + if (!nfs_readdir_use_cookie(filp)) { dir_ctx->dir_cookie = 0; + dir_ctx->page_index = 0; + } else + dir_ctx->dir_cookie = offset; if (offset == 0) memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); dir_ctx->duped = 0; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6e10725887d1..1c533f2c1f36 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -105,6 +105,7 @@ struct nfs_open_dir_context { __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 dup_cookie; + __u64 last_cookie; pgoff_t page_index; signed char duped; bool eof; -- cgit v1.2.3-71-gd317 From 580f236737d13ee25d5b0b1d124f50014fe6833b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 7 Feb 2022 13:37:00 -0500 Subject: NFS: Adjust the amount of readahead performed by NFS readdir The current NFS readdir code will always try to maximise the amount of readahead it performs on the assumption that we can cache anything that isn't immediately read by the process. There are several cases where this assumption breaks down, including when the 'ls -l' heuristic kicks in to try to force use of readdirplus as a batch replacement for lookup/getattr. This patch therefore tries to tone down the amount of readahead we perform, and adjust it to try to match the amount of data being requested by user space. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/nfs_fs.h | 1 + 2 files changed, 53 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 60f7feee0a16..520dc3ec4aef 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -69,6 +69,8 @@ const struct address_space_operations nfs_dir_aops = { .freepage = nfs_readdir_clear_array, }; +#define NFS_INIT_DTSIZE PAGE_SIZE + static struct nfs_open_dir_context * alloc_nfs_open_dir_context(struct inode *dir) { @@ -78,6 +80,7 @@ alloc_nfs_open_dir_context(struct inode *dir) ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (ctx != NULL) { ctx->attr_gencount = nfsi->attr_gencount; + ctx->dtsize = NFS_INIT_DTSIZE; spin_lock(&dir->i_lock); if (list_empty(&nfsi->open_files) && (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) @@ -154,6 +157,7 @@ struct nfs_readdir_descriptor { struct page *page; struct dir_context *ctx; pgoff_t page_index; + pgoff_t page_index_max; u64 dir_cookie; u64 last_cookie; u64 dup_cookie; @@ -166,12 +170,36 @@ struct nfs_readdir_descriptor { unsigned long gencount; unsigned long attr_gencount; unsigned int cache_entry_index; + unsigned int buffer_fills; + unsigned int dtsize; signed char duped; bool plus; bool eob; bool eof; }; +static void nfs_set_dtsize(struct nfs_readdir_descriptor *desc, unsigned int sz) +{ + struct nfs_server *server = NFS_SERVER(file_inode(desc->file)); + unsigned int maxsize = server->dtsize; + + if (sz > maxsize) + sz = maxsize; + if (sz < NFS_MIN_FILE_IO_SIZE) + sz = NFS_MIN_FILE_IO_SIZE; + desc->dtsize = sz; +} + +static void nfs_shrink_dtsize(struct nfs_readdir_descriptor *desc) +{ + nfs_set_dtsize(desc, desc->dtsize >> 1); +} + +static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc) +{ + nfs_set_dtsize(desc, desc->dtsize << 1); +} + static void nfs_readdir_array_init(struct nfs_cache_array *array) { memset(array, 0, sizeof(struct nfs_cache_array)); @@ -784,6 +812,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, break; arrays++; *arrays = page = new; + desc->page_index_max++; } else { new = nfs_readdir_page_get_next(mapping, page->index + 1, @@ -793,6 +822,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, if (page != *arrays) nfs_readdir_page_unlock_and_put(page); page = new; + desc->page_index_max = new->index; } status = nfs_readdir_add_to_array(entry, page); } while (!status && !entry->eof); @@ -858,7 +888,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry; size_t array_size; struct inode *inode = file_inode(desc->file); - size_t dtsize = NFS_SERVER(inode)->dtsize; + unsigned int dtsize = desc->dtsize; int status = -ENOMEM; entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -894,6 +924,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, status = nfs_readdir_page_filler(desc, entry, pages, pglen, arrays, narrays); + desc->buffer_fills++; } while (!status && nfs_readdir_page_needs_filling(page) && page_mapping(page)); @@ -941,6 +972,10 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) if (!desc->page) return -ENOMEM; if (nfs_readdir_page_needs_filling(desc->page)) { + /* Grow the dtsize if we had to go back for more pages */ + if (desc->page_index == desc->page_index_max) + nfs_grow_dtsize(desc); + desc->page_index_max = desc->page_index; res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf, &desc->page, 1); if (res < 0) { @@ -1075,6 +1110,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) desc->cache_entry_index = 0; desc->last_cookie = desc->dir_cookie; desc->duped = 0; + desc->page_index_max = 0; status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz); @@ -1084,10 +1120,22 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) } desc->page = NULL; + /* + * Grow the dtsize if we have to go back for more pages, + * or shrink it if we're reading too many. + */ + if (!desc->eof) { + if (!desc->eob) + nfs_grow_dtsize(desc); + else if (desc->buffer_fills == 1 && + i < (desc->page_index_max >> 1)) + nfs_shrink_dtsize(desc); + } for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_page_array_free(arrays[i]); out: + desc->page_index_max = -1; kfree(arrays); dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); return status; @@ -1126,6 +1174,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->file = file; desc->ctx = ctx; desc->plus = nfs_use_readdirplus(inode, ctx); + desc->page_index_max = -1; spin_lock(&file->f_lock); desc->dir_cookie = dir_ctx->dir_cookie; @@ -1136,6 +1185,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; + nfs_set_dtsize(desc, dir_ctx->dtsize); memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); spin_unlock(&file->f_lock); @@ -1187,6 +1237,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; dir_ctx->eof = desc->eof; + dir_ctx->dtsize = desc->dtsize; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); spin_unlock(&file->f_lock); out_free: diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1c533f2c1f36..691a27936849 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -107,6 +107,7 @@ struct nfs_open_dir_context { __u64 dup_cookie; __u64 last_cookie; pgoff_t page_index; + unsigned int dtsize; signed char duped; bool eof; }; -- cgit v1.2.3-71-gd317 From 230bc98f7a2a49eb472d184bdec91fd3096384b3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 17 Feb 2022 11:08:24 -0500 Subject: NFS: Improve heuristic for readdirplus The heuristic for readdirplus is designed to try to detect 'ls -l' and similar patterns. It does so by looking for cache hit/miss patterns in both the attribute cache and in the dcache of the files in a given directory, and then sets a flag for the readdirplus code to interpret. The problem with this approach is that a single attribute or dcache miss can cause the NFS code to force a refresh of the attributes for the entire set of files contained in the directory. To be able to make a more nuanced decision, let's sample the number of hits and misses in the set of open directory descriptors. That allows us to set thresholds at which we start preferring READDIRPLUS over regular READDIR, or at which we start to force a re-read of the remaining readdir cache using READDIRPLUS. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 80 ++++++++++++++++++++++++++++++++------------------ fs/nfs/inode.c | 4 +-- fs/nfs/internal.h | 4 +-- fs/nfs/nfstrace.h | 1 - include/linux/nfs_fs.h | 5 ++-- 5 files changed, 58 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index dc6acfd14fc7..098fc1bdaac8 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -87,8 +87,7 @@ alloc_nfs_open_dir_context(struct inode *dir) nfs_set_cache_invalid(dir, NFS_INO_INVALID_DATA | NFS_INO_REVAL_FORCED); - list_add(&ctx->list, &nfsi->open_files); - clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); + list_add_tail_rcu(&ctx->list, &nfsi->open_files); memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf)); spin_unlock(&dir->i_lock); return ctx; @@ -99,9 +98,9 @@ alloc_nfs_open_dir_context(struct inode *dir) static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx) { spin_lock(&dir->i_lock); - list_del(&ctx->list); + list_del_rcu(&ctx->list); spin_unlock(&dir->i_lock); - kfree(ctx); + kfree_rcu(ctx, rcu_head); } /* @@ -594,7 +593,6 @@ static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc, /* We requested READDIRPLUS, but the server doesn't grok it */ if (error == -ENOTSUPP && desc->plus) { NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; - clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); desc->plus = arg.plus = false; goto again; } @@ -644,51 +642,63 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) return 1; } -static -bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx) +#define NFS_READDIR_CACHE_USAGE_THRESHOLD (8UL) + +static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx, + unsigned int cache_hits, + unsigned int cache_misses) { if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS)) return false; - if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags)) - return true; - if (ctx->pos == 0) + if (ctx->pos == 0 || + cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD) return true; return false; } /* - * This function is called by the lookup and getattr code to request the + * This function is called by the getattr code to request the * use of readdirplus to accelerate any future lookups in the same * directory. */ -void nfs_advise_use_readdirplus(struct inode *dir) +void nfs_readdir_record_entry_cache_hit(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); + struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && - !list_empty(&nfsi->open_files)) - set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); + S_ISDIR(dir->i_mode)) { + rcu_read_lock(); + list_for_each_entry_rcu (ctx, &nfsi->open_files, list) + atomic_inc(&ctx->cache_hits); + rcu_read_unlock(); + } } /* * This function is mainly for use by nfs_getattr(). * * If this is an 'ls -l', we want to force use of readdirplus. - * Do this by checking if there is an active file descriptor - * and calling nfs_advise_use_readdirplus, then forcing a - * cache flush. */ -void nfs_force_use_readdirplus(struct inode *dir) +void nfs_readdir_record_entry_cache_miss(struct inode *dir) { struct nfs_inode *nfsi = NFS_I(dir); + struct nfs_open_dir_context *ctx; if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) && - !list_empty(&nfsi->open_files)) { - set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); - set_bit(NFS_INO_FORCE_READDIR, &nfsi->flags); + S_ISDIR(dir->i_mode)) { + rcu_read_lock(); + list_for_each_entry_rcu (ctx, &nfsi->open_files, list) + atomic_inc(&ctx->cache_misses); + rcu_read_unlock(); } } +static void nfs_lookup_advise_force_readdirplus(struct inode *dir) +{ + nfs_readdir_record_entry_cache_miss(dir); +} + static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry, unsigned long dir_verifier) @@ -1122,6 +1132,19 @@ out: return status; } +#define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL) + +static void nfs_readdir_handle_cache_misses(struct inode *inode, + struct nfs_readdir_descriptor *desc, + pgoff_t page_index, + unsigned int cache_misses) +{ + if (desc->ctx->pos == 0 || + cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) + return; + invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); +} + /* The file offset position represents the dirent entry number. A last cookie cache takes care of the common case of reading the whole directory. @@ -1133,6 +1156,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; + unsigned int cache_hits, cache_misses; pgoff_t page_index; int res; @@ -1154,7 +1178,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) goto out; desc->file = file; desc->ctx = ctx; - desc->plus = nfs_use_readdirplus(inode, ctx); desc->page_index_max = -1; spin_lock(&file->f_lock); @@ -1168,6 +1191,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->eof = dir_ctx->eof; nfs_set_dtsize(desc, dir_ctx->dtsize); memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); + cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0); + cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0); spin_unlock(&file->f_lock); if (desc->eof) { @@ -1175,9 +1200,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) goto out_free; } - if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) && - list_is_singular(&nfsi->open_files)) - invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1); + desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); + nfs_readdir_handle_cache_misses(inode, desc, page_index, cache_misses); do { res = readdir_search_pagecache(desc); @@ -1196,7 +1220,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) break; } if (res == -ETOOSMALL && desc->plus) { - clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags); nfs_zap_caches(inode); desc->page_index = 0; desc->plus = false; @@ -1610,7 +1633,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry, nfs_set_verifier(dentry, dir_verifier); /* set a readdirplus hint that we had a cache miss */ - nfs_force_use_readdirplus(dir); + nfs_lookup_advise_force_readdirplus(dir); ret = 1; out: nfs_free_fattr(fattr); @@ -1667,7 +1690,6 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, nfs_mark_dir_for_revalidate(dir); goto out_bad; } - nfs_advise_use_readdirplus(dir); goto out_valid; } @@ -1872,7 +1894,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in goto out; /* Notify readdir to use READDIRPLUS */ - nfs_force_use_readdirplus(dir); + nfs_lookup_advise_force_readdirplus(dir); no_entry: res = d_splice_alias(inode, dentry); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 7cecabf57b95..bbf4357ff727 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -787,7 +787,7 @@ static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry) if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) return; parent = dget_parent(dentry); - nfs_force_use_readdirplus(d_inode(parent)); + nfs_readdir_record_entry_cache_miss(d_inode(parent)); dput(parent); } @@ -798,7 +798,7 @@ static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry) if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS)) return; parent = dget_parent(dentry); - nfs_advise_use_readdirplus(d_inode(parent)); + nfs_readdir_record_entry_cache_hit(d_inode(parent)); dput(parent); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b5398af53c7f..194840a97e3a 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -366,8 +366,8 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp, const struct nfs_client_initdata *); /* dir.c */ -extern void nfs_advise_use_readdirplus(struct inode *dir); -extern void nfs_force_use_readdirplus(struct inode *dir); +extern void nfs_readdir_record_entry_cache_hit(struct inode *dir); +extern void nfs_readdir_record_entry_cache_miss(struct inode *dir); extern unsigned long nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc); extern unsigned long nfs_access_cache_scan(struct shrinker *shrink, diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 45a310b586ce..3672f6703ee7 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -36,7 +36,6 @@ #define nfs_show_nfsi_flags(v) \ __print_flags(v, "|", \ - { BIT(NFS_INO_ADVISE_RDPLUS), "ADVISE_RDPLUS" }, \ { BIT(NFS_INO_STALE), "STALE" }, \ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 691a27936849..20a4cf0acad2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -101,6 +101,8 @@ struct nfs_open_context { struct nfs_open_dir_context { struct list_head list; + atomic_t cache_hits; + atomic_t cache_misses; unsigned long attr_gencount; __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; @@ -110,6 +112,7 @@ struct nfs_open_dir_context { unsigned int dtsize; signed char duped; bool eof; + struct rcu_head rcu_head; }; /* @@ -274,13 +277,11 @@ struct nfs4_copy_state { /* * Bit offsets in flags field */ -#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */ #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ -#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ -- cgit v1.2.3-71-gd317 From f648022faa68ef76058aa121d1aa3a967d59cae8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 11:31:51 -0500 Subject: NFS: Convert readdir page cache to use a cookie based index Instead of using a linear index to address the pages, use the cookie of the first entry, since that is what we use to match the page anyway. This allows us to avoid re-reading the entire cache on a seekdir() type of operation. The latter is very common when re-exporting NFS, and is a major performance drain. The change does affect our duplicate cookie detection, since we can no longer rely on the page index as a linear offset for detecting whether we looped backwards. However since we no longer do a linear search through all the pages on each call to nfs_readdir(), this is less of a concern than it was previously. The other downside is that invalidate_mapping_pages() no longer can use the page index to avoid clearing pages that have been read. A subsequent patch will restore the functionality this provides to the 'ls -l' heuristic. Signed-off-by: Trond Myklebust --- fs/nfs/Kconfig | 4 ++ fs/nfs/dir.c | 149 +++++++++++++++++++++---------------------------- include/linux/nfs_fs.h | 2 - 3 files changed, 69 insertions(+), 86 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 14a72224b657..47a53b3362b6 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -4,6 +4,10 @@ config NFS_FS depends on INET && FILE_LOCKING && MULTIUSER select LOCKD select SUNRPC + select CRYPTO + select CRYPTO_HASH + select XXHASH + select CRYPTO_XXHASH select NFS_ACL_SUPPORT if NFS_V3_ACL help Choose Y here if you want to access files residing on other diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4983950de2ad..8c2552d89310 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "delegation.h" #include "iostat.h" @@ -159,9 +160,7 @@ struct nfs_readdir_descriptor { pgoff_t page_index_max; u64 dir_cookie; u64 last_cookie; - u64 dup_cookie; loff_t current_index; - loff_t prev_index; __be32 verf[NFS_DIR_VERIFIER_SIZE]; unsigned long dir_verifier; @@ -171,7 +170,6 @@ struct nfs_readdir_descriptor { unsigned int cache_entry_index; unsigned int buffer_fills; unsigned int dtsize; - signed char duped; bool plus; bool eob; bool eof; @@ -331,6 +329,28 @@ out: return ret; } +#define NFS_READDIR_COOKIE_MASK (U32_MAX >> 14) +/* + * Hash algorithm allowing content addressible access to sequences + * of directory cookies. Content is addressed by the value of the + * cookie index of the first readdir entry in a page. + * + * The xxhash algorithm is chosen because it is fast, and is supposed + * to result in a decent flat distribution of hashes. + * + * We then select only the first 18 bits to avoid issues with excessive + * memory use for the page cache XArray. 18 bits should allow the caching + * of 262144 pages of sequences of readdir entries. Since each page holds + * 127 readdir entries for a typical 64-bit system, that works out to a + * cache of ~ 33 million entries per directory. + */ +static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie) +{ + if (cookie == 0) + return 0; + return xxhash(&cookie, sizeof(cookie), 0) & NFS_READDIR_COOKIE_MASK; +} + static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie, u64 change_attr) { @@ -352,15 +372,15 @@ static void nfs_readdir_page_unlock_and_put(struct page *page) } static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, - pgoff_t index, u64 last_cookie) + u64 last_cookie, + u64 change_attr) { + pgoff_t index = nfs_readdir_page_cookie_hash(last_cookie); struct page *page; - u64 change_attr; page = grab_cache_page(mapping, index); if (!page) return NULL; - change_attr = inode_peek_iversion_raw(mapping->host); if (PageUptodate(page)) { if (nfs_readdir_page_validate(page, last_cookie, change_attr)) return page; @@ -371,11 +391,6 @@ static struct page *nfs_readdir_page_get_locked(struct address_space *mapping, return page; } -static loff_t nfs_readdir_page_offset(struct page *page) -{ - return (loff_t)page->index * (loff_t)nfs_readdir_array_maxentries(); -} - static u64 nfs_readdir_page_last_cookie(struct page *page) { struct nfs_cache_array *array; @@ -408,11 +423,11 @@ static void nfs_readdir_page_set_eof(struct page *page) } static struct page *nfs_readdir_page_get_next(struct address_space *mapping, - pgoff_t index, u64 cookie) + u64 cookie, u64 change_attr) { struct page *page; - page = nfs_readdir_page_get_locked(mapping, index, cookie); + page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); if (page) { if (nfs_readdir_page_last_cookie(page) == cookie) return page; @@ -452,6 +467,13 @@ static void nfs_readdir_seek_next_array(struct nfs_cache_array *array, desc->last_cookie = array->array[0].cookie; } +static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc) +{ + desc->current_index = 0; + desc->last_cookie = 0; + desc->page_index = 0; +} + static int nfs_readdir_search_for_pos(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { @@ -492,8 +514,7 @@ static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array, static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, struct nfs_readdir_descriptor *desc) { - int i; - loff_t new_pos; + unsigned int i; int status = -EAGAIN; if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie)) @@ -501,32 +522,10 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, for (i = 0; i < array->size; i++) { if (array->array[i].cookie == desc->dir_cookie) { - struct nfs_inode *nfsi = NFS_I(file_inode(desc->file)); - - new_pos = nfs_readdir_page_offset(desc->page) + i; - if (desc->attr_gencount != nfsi->attr_gencount) { - desc->duped = 0; - desc->attr_gencount = nfsi->attr_gencount; - } else if (new_pos < desc->prev_index) { - if (desc->duped > 0 - && desc->dup_cookie == desc->dir_cookie) { - if (printk_ratelimit()) { - pr_notice("NFS: directory %pD2 contains a readdir loop." - "Please contact your server vendor. " - "The file: %s has duplicate cookie %llu\n", - desc->file, array->array[i].name, desc->dir_cookie); - } - status = -ELOOP; - goto out; - } - desc->dup_cookie = desc->dir_cookie; - desc->duped = -1; - } if (nfs_readdir_use_cookie(desc->file)) desc->ctx->pos = desc->dir_cookie; else - desc->ctx->pos = new_pos; - desc->prev_index = new_pos; + desc->ctx->pos = desc->current_index + i; desc->cache_entry_index = i; return 0; } @@ -538,7 +537,6 @@ check_eof: desc->eof = true; } else nfs_readdir_seek_next_array(array, desc); -out: return status; } @@ -785,10 +783,9 @@ out: /* Perform conversion from xdr to cache array */ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, struct nfs_entry *entry, - struct page **xdr_pages, - unsigned int buflen, - struct page **arrays, - size_t narrays) + struct page **xdr_pages, unsigned int buflen, + struct page **arrays, size_t narrays, + u64 change_attr) { struct address_space *mapping = desc->file->f_mapping; struct xdr_stream stream; @@ -828,18 +825,16 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, break; arrays++; *arrays = page = new; - desc->page_index_max++; } else { - new = nfs_readdir_page_get_next(mapping, - page->index + 1, - entry->prev_cookie); + new = nfs_readdir_page_get_next( + mapping, entry->prev_cookie, change_attr); if (!new) break; if (page != *arrays) nfs_readdir_page_unlock_and_put(page); page = new; - desc->page_index_max = new->index; } + desc->page_index_max++; status = nfs_readdir_add_to_array(entry, page); } while (!status && !entry->eof); @@ -899,6 +894,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, __be32 *verf_arg, __be32 *verf_res, struct page **arrays, size_t narrays) { + u64 change_attr; struct page **pages; struct page *page = *arrays; struct nfs_entry *entry; @@ -923,6 +919,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, if (!pages) goto out; + change_attr = inode_peek_iversion_raw(inode); status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages, dtsize, verf_res); if (status < 0) @@ -931,7 +928,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc, pglen = status; if (pglen != 0) status = nfs_readdir_page_filler(desc, entry, pages, pglen, - arrays, narrays); + arrays, narrays, change_attr); else nfs_readdir_page_set_eof(page); desc->buffer_fills++; @@ -961,9 +958,11 @@ nfs_readdir_page_unlock_and_put_cached(struct nfs_readdir_descriptor *desc) static struct page * nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc) { - return nfs_readdir_page_get_locked(desc->file->f_mapping, - desc->page_index, - desc->last_cookie); + struct address_space *mapping = desc->file->f_mapping; + u64 change_attr = inode_peek_iversion_raw(mapping->host); + + return nfs_readdir_page_get_locked(mapping, desc->last_cookie, + change_attr); } /* @@ -995,7 +994,7 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) trace_nfs_readdir_cache_fill_done(inode, res); if (res == -EBADCOOKIE || res == -ENOTSYNC) { invalidate_inode_pages2(desc->file->f_mapping); - desc->page_index = 0; + nfs_readdir_rewind_search(desc); trace_nfs_readdir_invalidate_cache_range( inode, 0, MAX_LFS_FILESIZE); return -EAGAIN; @@ -1009,12 +1008,10 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) { memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf)); - invalidate_inode_pages2_range(desc->file->f_mapping, - desc->page_index_max + 1, + invalidate_inode_pages2_range(desc->file->f_mapping, 1, -1); trace_nfs_readdir_invalidate_cache_range( - inode, desc->page_index_max + 1, - MAX_LFS_FILESIZE); + inode, 1, MAX_LFS_FILESIZE); } } res = nfs_readdir_search_array(desc); @@ -1030,11 +1027,6 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc) int res; do { - if (desc->page_index == 0) { - desc->current_index = 0; - desc->prev_index = 0; - desc->last_cookie = 0; - } res = find_and_lock_cache_page(desc); } while (res == -EAGAIN); return res; @@ -1072,8 +1064,6 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc, desc->ctx->pos = desc->dir_cookie; else desc->ctx->pos++; - if (desc->duped != 0) - desc->duped = 1; } if (array->page_is_eof) desc->eof = !desc->eob; @@ -1115,7 +1105,6 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc) desc->page_index = 0; desc->cache_entry_index = 0; desc->last_cookie = desc->dir_cookie; - desc->duped = 0; desc->page_index_max = 0; trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie, @@ -1148,6 +1137,8 @@ out_free: for (i = 0; i < sz && arrays[i]; i++) nfs_readdir_page_array_free(arrays[i]); out: + if (!nfs_readdir_use_cookie(desc->file)) + nfs_readdir_rewind_search(desc); desc->page_index_max = -1; kfree(arrays); dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status); @@ -1158,17 +1149,14 @@ out: static void nfs_readdir_handle_cache_misses(struct inode *inode, struct nfs_readdir_descriptor *desc, - pgoff_t page_index, unsigned int cache_misses) { if (desc->ctx->pos == 0 || cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) return; - if (invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1) == 0) + if (invalidate_mapping_pages(inode->i_mapping, 0, -1) == 0) return; - trace_nfs_readdir_invalidate_cache_range( - inode, (loff_t)(page_index + 1) << PAGE_SHIFT, - MAX_LFS_FILESIZE); + trace_nfs_readdir_invalidate_cache_range(inode, 0, MAX_LFS_FILESIZE); } /* The file offset position represents the dirent entry number. A @@ -1183,7 +1171,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; unsigned int cache_hits, cache_misses; - pgoff_t page_index; int res; dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", @@ -1208,10 +1195,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); desc->dir_cookie = dir_ctx->dir_cookie; - desc->dup_cookie = dir_ctx->dup_cookie; - desc->duped = dir_ctx->duped; - page_index = dir_ctx->page_index; - desc->page_index = page_index; + desc->page_index = dir_ctx->page_index; desc->last_cookie = dir_ctx->last_cookie; desc->attr_gencount = dir_ctx->attr_gencount; desc->eof = dir_ctx->eof; @@ -1227,7 +1211,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); - nfs_readdir_handle_cache_misses(inode, desc, page_index, cache_misses); + nfs_readdir_handle_cache_misses(inode, desc, cache_misses); do { res = readdir_search_pagecache(desc); @@ -1247,7 +1231,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } if (res == -ETOOSMALL && desc->plus) { nfs_zap_caches(inode); - desc->page_index = 0; desc->plus = false; desc->eof = false; continue; @@ -1261,9 +1244,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) spin_lock(&file->f_lock); dir_ctx->dir_cookie = desc->dir_cookie; - dir_ctx->dup_cookie = desc->dup_cookie; dir_ctx->last_cookie = desc->last_cookie; - dir_ctx->duped = desc->duped; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; dir_ctx->eof = desc->eof; @@ -1306,13 +1287,13 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) if (offset != filp->f_pos) { filp->f_pos = offset; dir_ctx->page_index = 0; - if (!nfs_readdir_use_cookie(filp)) + if (!nfs_readdir_use_cookie(filp)) { dir_ctx->dir_cookie = 0; - else + dir_ctx->last_cookie = 0; + } else { dir_ctx->dir_cookie = offset; - if (offset == 0) - memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf)); - dir_ctx->duped = 0; + dir_ctx->last_cookie = offset; + } dir_ctx->eof = false; } spin_unlock(&filp->f_lock); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 20a4cf0acad2..42aad886d3c0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -106,11 +106,9 @@ struct nfs_open_dir_context { unsigned long attr_gencount; __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; - __u64 dup_cookie; __u64 last_cookie; pgoff_t page_index; unsigned int dtsize; - signed char duped; bool eof; struct rcu_head rcu_head; }; -- cgit v1.2.3-71-gd317 From b0365ccb0712efacf99936e94e92eb7ae63de4d5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 23 Feb 2022 13:29:59 -0500 Subject: NFS: Fix up forced readdirplus Avoid clearing the entire readdir page cache if we're just doing forced readdirplus for the 'ls -l' heuristic. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 56 +++++++++++++++++++++++++++++++++++--------------- fs/nfs/nfstrace.h | 1 + include/linux/nfs_fs.h | 1 + 3 files changed, 41 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8c2552d89310..f6aac1e8a8b9 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -170,6 +170,7 @@ struct nfs_readdir_descriptor { unsigned int cache_entry_index; unsigned int buffer_fills; unsigned int dtsize; + bool clear_cache; bool plus; bool eob; bool eof; @@ -227,6 +228,13 @@ static void nfs_readdir_clear_array(struct page *page) kunmap_atomic(array); } +static void nfs_readdir_page_reinit_array(struct page *page, u64 last_cookie, + u64 change_attr) +{ + nfs_readdir_clear_array(page); + nfs_readdir_page_init_array(page, last_cookie, change_attr); +} + static struct page * nfs_readdir_page_array_alloc(u64 last_cookie, gfp_t gfp_flags) { @@ -428,12 +436,11 @@ static struct page *nfs_readdir_page_get_next(struct address_space *mapping, struct page *page; page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); - if (page) { - if (nfs_readdir_page_last_cookie(page) == cookie) - return page; - nfs_readdir_page_unlock_and_put(page); - } - return NULL; + if (!page) + return NULL; + if (nfs_readdir_page_last_cookie(page) != cookie) + nfs_readdir_page_reinit_array(page, cookie, change_attr); + return page; } static inline @@ -960,9 +967,15 @@ nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc) { struct address_space *mapping = desc->file->f_mapping; u64 change_attr = inode_peek_iversion_raw(mapping->host); + u64 cookie = desc->last_cookie; + struct page *page; - return nfs_readdir_page_get_locked(mapping, desc->last_cookie, - change_attr); + page = nfs_readdir_page_get_locked(mapping, cookie, change_attr); + if (!page) + return NULL; + if (desc->clear_cache && !nfs_readdir_page_needs_filling(page)) + nfs_readdir_page_reinit_array(page, cookie, change_attr); + return page; } /* @@ -1013,6 +1026,7 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc) trace_nfs_readdir_invalidate_cache_range( inode, 1, MAX_LFS_FILESIZE); } + desc->clear_cache = false; } res = nfs_readdir_search_array(desc); if (res == 0) @@ -1147,16 +1161,17 @@ out: #define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL) -static void nfs_readdir_handle_cache_misses(struct inode *inode, +static bool nfs_readdir_handle_cache_misses(struct inode *inode, struct nfs_readdir_descriptor *desc, - unsigned int cache_misses) + unsigned int cache_misses, + bool force_clear) { - if (desc->ctx->pos == 0 || - cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD) - return; - if (invalidate_mapping_pages(inode->i_mapping, 0, -1) == 0) - return; - trace_nfs_readdir_invalidate_cache_range(inode, 0, MAX_LFS_FILESIZE); + if (desc->ctx->pos == 0 || !desc->plus) + return false; + if (cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD && !force_clear) + return false; + trace_nfs_readdir_force_readdirplus(inode); + return true; } /* The file offset position represents the dirent entry number. A @@ -1171,6 +1186,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) struct nfs_open_dir_context *dir_ctx = file->private_data; struct nfs_readdir_descriptor *desc; unsigned int cache_hits, cache_misses; + bool force_clear; int res; dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", @@ -1203,6 +1219,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf)); cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0); cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0); + force_clear = dir_ctx->force_clear; spin_unlock(&file->f_lock); if (desc->eof) { @@ -1211,7 +1228,9 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) } desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses); - nfs_readdir_handle_cache_misses(inode, desc, cache_misses); + force_clear = nfs_readdir_handle_cache_misses(inode, desc, cache_misses, + force_clear); + desc->clear_cache = force_clear; do { res = readdir_search_pagecache(desc); @@ -1240,6 +1259,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) nfs_do_filldir(desc, nfsi->cookieverf); nfs_readdir_page_unlock_and_put_cached(desc); + if (desc->page_index == desc->page_index_max) + desc->clear_cache = force_clear; } while (!desc->eob && !desc->eof); spin_lock(&file->f_lock); @@ -1247,6 +1268,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) dir_ctx->last_cookie = desc->last_cookie; dir_ctx->attr_gencount = desc->attr_gencount; dir_ctx->page_index = desc->page_index; + dir_ctx->force_clear = force_clear; dir_ctx->eof = desc->eof; dir_ctx->dtsize = desc->dtsize; memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf)); diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index ec2645d20abf..59f4ca803fd0 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -160,6 +160,7 @@ DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); +DEFINE_NFS_INODE_EVENT(nfs_readdir_force_readdirplus); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_cache_fill_done); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_uncached_done); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 42aad886d3c0..3893386ceaed 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -109,6 +109,7 @@ struct nfs_open_dir_context { __u64 last_cookie; pgoff_t page_index; unsigned int dtsize; + bool force_clear; bool eof; struct rcu_head rcu_head; }; -- cgit v1.2.3-71-gd317 From 0adf85b445c7fbc5d2df1f8c1bc54d62c4340237 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 27 Feb 2022 12:46:24 -0500 Subject: NFS: Optimise away the previous cookie field Replace the 'previous cookie' field in struct nfs_entry with the array->last_cookie. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 26 ++++++++++++++------------ fs/nfs/nfs2xdr.c | 1 - fs/nfs/nfs3xdr.c | 1 - fs/nfs/nfs4xdr.c | 1 - include/linux/nfs_xdr.h | 3 +-- 5 files changed, 15 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index f6aac1e8a8b9..033249a72e92 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -301,19 +301,20 @@ static int nfs_readdir_array_can_expand(struct nfs_cache_array *array) return 0; } -static -int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) +static int nfs_readdir_page_array_append(struct page *page, + const struct nfs_entry *entry, + u64 *cookie) { struct nfs_cache_array *array; struct nfs_cache_array_entry *cache_entry; const char *name; - int ret; + int ret = -ENOMEM; name = nfs_readdir_copy_name(entry->name, entry->len); - if (!name) - return -ENOMEM; array = kmap_atomic(page); + if (!name) + goto out; ret = nfs_readdir_array_can_expand(array); if (ret) { kfree(name); @@ -321,7 +322,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) } cache_entry = &array->array[array->size]; - cache_entry->cookie = entry->prev_cookie; + cache_entry->cookie = array->last_cookie; cache_entry->ino = entry->ino; cache_entry->d_type = entry->d_type; cache_entry->name_len = entry->len; @@ -333,6 +334,7 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) if (entry->eof != 0) nfs_readdir_array_set_eof(array); out: + *cookie = array->last_cookie; kunmap_atomic(array); return ret; } @@ -798,6 +800,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, struct xdr_stream stream; struct xdr_buf buf; struct page *scratch, *new, *page = *arrays; + u64 cookie; int status; scratch = alloc_page(GFP_KERNEL); @@ -819,22 +822,21 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier); - status = nfs_readdir_add_to_array(entry, page); + status = nfs_readdir_page_array_append(page, entry, &cookie); if (status != -ENOSPC) continue; if (page->mapping != mapping) { if (!--narrays) break; - new = nfs_readdir_page_array_alloc(entry->prev_cookie, - GFP_KERNEL); + new = nfs_readdir_page_array_alloc(cookie, GFP_KERNEL); if (!new) break; arrays++; *arrays = page = new; } else { - new = nfs_readdir_page_get_next( - mapping, entry->prev_cookie, change_attr); + new = nfs_readdir_page_get_next(mapping, cookie, + change_attr); if (!new) break; if (page != *arrays) @@ -842,7 +844,7 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc, page = new; } desc->page_index_max++; - status = nfs_readdir_add_to_array(entry, page); + status = nfs_readdir_page_array_append(page, entry, &cookie); } while (!status && !entry->eof); switch (status) { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 3d5ba43f44bb..05c3b4b2b3dd 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -955,7 +955,6 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, * The type (size and byte order) of nfscookie isn't defined in * RFC 1094. This implementation assumes that it's an XDR uint32. */ - entry->prev_cookie = entry->cookie; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) return -EAGAIN; diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index d6779ceeb39e..3b0b650c9c5a 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -2024,7 +2024,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, zero_nfs_fh3(entry->fh); } - entry->prev_cookie = entry->cookie; entry->cookie = new_cookie; return 0; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index b7780b97dc4d..86a5f6516928 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7508,7 +7508,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); - entry->prev_cookie = entry->cookie; entry->cookie = new_cookie; return 0; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 728cb0c1f0b6..82f7c2730b9a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -745,8 +745,7 @@ struct nfs_auth_info { */ struct nfs_entry { __u64 ino; - __u64 cookie, - prev_cookie; + __u64 cookie; const char * name; unsigned int len; int eof; -- cgit v1.2.3-71-gd317 From d6097b8d5d55f26cd2244e7e7f00a5a077772a91 Mon Sep 17 00:00:00 2001 From: Nicolai Stange Date: Mon, 21 Feb 2022 13:10:58 +0100 Subject: crypto: api - allow algs only in specific constructions in FIPS mode Currently we do not distinguish between algorithms that fail on the self-test vs. those which are disabled in FIPS mode (not allowed). Both are marked as having failed the self-test. Recently the need arose to allow the usage of certain algorithms only as arguments to specific template instantiations in FIPS mode. For example, standalone "dh" must be blocked, but e.g. "ffdhe2048(dh)" is allowed. Other potential use cases include "cbcmac(aes)", which must only be used with ccm(), or "ghash", which must be used only for gcm(). This patch allows this scenario by adding a new flag FIPS_INTERNAL to indicate those algorithms that are not FIPS-allowed. They can then be used as template arguments only, i.e. when looked up via crypto_grab_spawn() to be more specific. The FIPS_INTERNAL bit gets propagated upwards recursively into the surrounding template instances, until the construction eventually matches an explicit testmgr entry with ->fips_allowed being set, if any. The behaviour to skip !->fips_allowed self-test executions in FIPS mode will be retained. Note that this effectively means that FIPS_INTERNAL algorithms are handled very similarly to the INTERNAL ones in this regard. It is expected that the FIPS_INTERNAL algorithms will receive sufficient testing when the larger constructions they're a part of, if any, get exercised by testmgr. Note that as a side-effect of this patch algorithms which are not FIPS-allowed will now return ENOENT instead of ELIBBAD. Hopefully this is not an issue as some people were relying on this already. Link: https://lore.kernel.org/r/YeEVSaMEVJb3cQkq@gondor.apana.org.au Originally-by: Herbert Xu Signed-off-by: Nicolai Stange Reviewed-by: Hannes Reinecke Signed-off-by: Herbert Xu --- crypto/algapi.c | 18 ++++++++++++++++-- crypto/api.c | 19 +++++++++++++++++-- crypto/tcrypt.c | 4 ++-- crypto/testmgr.c | 23 +++++++++++++++++++---- include/linux/crypto.h | 9 +++++++++ 5 files changed, 63 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index 9f15e11f5d73..53c5149e6abf 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -328,8 +328,16 @@ void crypto_alg_tested(const char *name, int err) found: q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (err || list_empty(&alg->cra_list)) + + if (list_empty(&alg->cra_list)) + goto complete; + + if (err == -ECANCELED) + alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL; + else if (err) goto complete; + else + alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL; alg->cra_flags |= CRYPTO_ALG_TESTED; @@ -610,6 +618,7 @@ int crypto_register_instance(struct crypto_template *tmpl, { struct crypto_larval *larval; struct crypto_spawn *spawn; + u32 fips_internal = 0; int err; err = crypto_check_alg(&inst->alg); @@ -632,11 +641,15 @@ int crypto_register_instance(struct crypto_template *tmpl, spawn->inst = inst; spawn->registered = true; + fips_internal |= spawn->alg->cra_flags; + crypto_mod_put(spawn->alg); spawn = next; } + inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL); + larval = __crypto_register_alg(&inst->alg); if (IS_ERR(larval)) goto unlock; @@ -689,7 +702,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, if (IS_ERR(name)) return PTR_ERR(name); - alg = crypto_find_alg(name, spawn->frontend, type, mask); + alg = crypto_find_alg(name, spawn->frontend, + type | CRYPTO_ALG_FIPS_INTERNAL, mask); if (IS_ERR(alg)) return PTR_ERR(alg); diff --git a/crypto/api.c b/crypto/api.c index cf0869dd130b..549f9aced1da 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -223,6 +223,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) else if (crypto_is_test_larval(larval) && !(alg->cra_flags & CRYPTO_ALG_TESTED)) alg = ERR_PTR(-EAGAIN); + else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) + alg = ERR_PTR(-EAGAIN); else if (!crypto_mod_get(alg)) alg = ERR_PTR(-EAGAIN); crypto_mod_put(&larval->alg); @@ -233,6 +235,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) { + const u32 fips = CRYPTO_ALG_FIPS_INTERNAL; struct crypto_alg *alg; u32 test = 0; @@ -240,8 +243,20 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, test |= CRYPTO_ALG_TESTED; down_read(&crypto_alg_sem); - alg = __crypto_alg_lookup(name, type | test, mask | test); - if (!alg && test) { + alg = __crypto_alg_lookup(name, (type | test) & ~fips, + (mask | test) & ~fips); + if (alg) { + if (((type | mask) ^ fips) & fips) + mask |= fips; + mask &= fips; + + if (!crypto_is_larval(alg) && + ((type ^ alg->cra_flags) & mask)) { + /* Algorithm is disallowed in FIPS mode. */ + crypto_mod_put(alg); + alg = ERR_PTR(-ENOENT); + } + } else if (test) { alg = __crypto_alg_lookup(name, type, mask); if (alg && !crypto_is_larval(alg)) { /* Test failed */ diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 2a808e843de5..2bacf8384f59 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1473,8 +1473,8 @@ static inline int tcrypt_test(const char *alg) pr_debug("testing %s\n", alg); ret = alg_test(alg, alg, 0, 0); - /* non-fips algs return -EINVAL in fips mode */ - if (fips_enabled && ret == -EINVAL) + /* non-fips algs return -EINVAL or -ECANCELED in fips mode */ + if (fips_enabled && (ret == -EINVAL || ret == -ECANCELED)) ret = 0; return ret; } diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 394d1952b2c5..5fabd7bbfba5 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5650,6 +5650,13 @@ static int alg_find_test(const char *alg) return -1; } +static int alg_fips_disabled(const char *driver, const char *alg) +{ + pr_info("alg: %s (%s) is disabled due to FIPS\n", alg, driver); + + return -ECANCELED; +} + int alg_test(const char *driver, const char *alg, u32 type, u32 mask) { int i; @@ -5686,9 +5693,13 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) if (i < 0 && j < 0) goto notest; - if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || - (j >= 0 && !alg_test_descs[j].fips_allowed))) - goto non_fips_alg; + if (fips_enabled) { + if (j >= 0 && !alg_test_descs[j].fips_allowed) + return -EINVAL; + + if (i >= 0 && !alg_test_descs[i].fips_allowed) + goto non_fips_alg; + } rc = 0; if (i >= 0) @@ -5718,9 +5729,13 @@ test_done: notest: printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); + + if (type & CRYPTO_ALG_FIPS_INTERNAL) + return alg_fips_disabled(driver, alg); + return 0; non_fips_alg: - return -EINVAL; + return alg_fips_disabled(driver, alg); } #endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 855869e1fd32..2324ab6f1846 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -132,6 +132,15 @@ */ #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 +/* + * Mark an algorithm as a service implementation only usable by a + * template and never by a normal user of the kernel crypto API. + * This is intended to be used by algorithms that are themselves + * not FIPS-approved but may instead be used to implement parts of + * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). + */ +#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 + /* * Transform masks and values (for crt_flags). */ -- cgit v1.2.3-71-gd317 From 80f940ef527efdd8e36c69f7b5ee8e07ac8891d9 Mon Sep 17 00:00:00 2001 From: Harsha Date: Wed, 23 Feb 2022 16:05:02 +0530 Subject: firmware: xilinx: Add ZynqMP SHA API for SHA3 functionality This patch adds zynqmp_pm_sha_hash API in the ZynqMP firmware to compute SHA3 hash of given data. Signed-off-by: Harsha Signed-off-by: Kalyani Akula Acked-by: Michal Simek Signed-off-by: Herbert Xu --- drivers/firmware/xilinx/zynqmp.c | 26 ++++++++++++++++++++++++++ include/linux/firmware/xlnx-zynqmp.h | 8 ++++++++ 2 files changed, 34 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 450c5f6a1cbf..5e5b0bb2e4e0 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1120,6 +1120,32 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out) } EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine); +/** + * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash + * @address: Address of the data/ Address of output buffer where + * hash should be stored. + * @size: Size of the data. + * @flags: + * BIT(0) - for initializing csudma driver and SHA3(Here address + * and size inputs can be NULL). + * BIT(1) - to call Sha3_Update API which can be called multiple + * times when data is not contiguous. + * BIT(2) - to get final hash of the whole updated data. + * Hash will be overwritten at provided address with + * 48 bytes. + * + * Return: Returns status, either success or error code. + */ +int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags) +{ + u32 lower_addr = lower_32_bits(address); + u32 upper_addr = upper_32_bits(address); + + return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr, + size, flags, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash); + /** * zynqmp_pm_register_notifier() - PM API for register a subsystem * to be notified about specific diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 907cb01890cf..f6783f58c64a 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -93,6 +93,7 @@ enum pm_api_id { PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, + PM_SECURE_SHA = 26, PM_PINCTRL_REQUEST = 28, PM_PINCTRL_RELEASE = 29, PM_PINCTRL_GET_FUNCTION = 30, @@ -427,6 +428,7 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack); int zynqmp_pm_aes_engine(const u64 address, u32 *out); +int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags); int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags); int zynqmp_pm_fpga_get_status(u32 *value); int zynqmp_pm_write_ggs(u32 index, u32 value); @@ -601,6 +603,12 @@ static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out) return -ENODEV; } +static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size, + const u32 flags) +{ + return -ENODEV; +} + static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags) { -- cgit v1.2.3-71-gd317 From 4f9a7a1dc2a294c5c5c4b0246e2281e6ec88fb91 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 2 Mar 2022 11:29:14 +0000 Subject: OPP: Add "opp-microwatt" supporting code Add new property to the OPP: power value. The OPP entry in the DT can have "opp-microwatt". Add the needed code to handle this new property in the existing infrastructure. Signed-off-by: Lukasz Luba Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 25 +++++++++++++++++++++++++ drivers/opp/debugfs.c | 3 +++ drivers/opp/of.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- include/linux/pm_opp.h | 12 +++++++++++- 4 files changed, 84 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 3057beabd370..740407252298 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -113,6 +113,31 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); +/** + * dev_pm_opp_get_power() - Gets the power corresponding to an opp + * @opp: opp for which power has to be returned for + * + * Return: power in micro watt corresponding to the opp, else + * return 0 + * + * This is useful only for devices with single power supply. + */ +unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) +{ + unsigned long opp_power = 0; + int i; + + if (IS_ERR_OR_NULL(opp)) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + for (i = 0; i < opp->opp_table->regulator_count; i++) + opp_power += opp->supplies[i].u_watt; + + return opp_power; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); + /** * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp * @opp: opp for which frequency has to be returned for diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index b5f2f9f39392..3fcc1f97f2d1 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -100,6 +100,9 @@ static void opp_debug_create_supplies(struct dev_pm_opp *opp, debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->supplies[i].u_amp); + + debugfs_create_ulong("u_watt", S_IRUGO, d, + &opp->supplies[i].u_watt); } } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 2f40afa4e65c..7bff30f27dc1 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -575,8 +575,9 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, struct opp_table *opp_table) { - u32 *microvolt, *microamp = NULL; - int supplies = opp_table->regulator_count, vcount, icount, ret, i, j; + u32 *microvolt, *microamp = NULL, *microwatt = NULL; + int supplies = opp_table->regulator_count; + int vcount, icount, pcount, ret, i, j; struct property *prop = NULL; char name[NAME_MAX]; @@ -688,6 +689,43 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, } } + /* Search for "opp-microwatt" */ + sprintf(name, "opp-microwatt"); + prop = of_find_property(opp->np, name, NULL); + + if (prop) { + pcount = of_property_count_u32_elems(opp->np, name); + if (pcount < 0) { + dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, + name, pcount); + ret = pcount; + goto free_microamp; + } + + if (pcount != supplies) { + dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", + __func__, name, pcount, supplies); + ret = -EINVAL; + goto free_microamp; + } + + microwatt = kmalloc_array(pcount, sizeof(*microwatt), + GFP_KERNEL); + if (!microwatt) { + ret = -EINVAL; + goto free_microamp; + } + + ret = of_property_read_u32_array(opp->np, name, microwatt, + pcount); + if (ret) { + dev_err(dev, "%s: error parsing %s: %d\n", __func__, + name, ret); + ret = -EINVAL; + goto free_microwatt; + } + } + for (i = 0, j = 0; i < supplies; i++) { opp->supplies[i].u_volt = microvolt[j++]; @@ -701,8 +739,13 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, if (microamp) opp->supplies[i].u_amp = microamp[i]; + + if (microwatt) + opp->supplies[i].u_watt = microwatt[i]; } +free_microwatt: + kfree(microwatt); free_microamp: kfree(microamp); free_microvolt: diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 879c138c7b8e..0d85a63a1f78 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -32,14 +32,17 @@ enum dev_pm_opp_event { * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP * @u_amp: Maximum current drawn by the device in microamperes + * @u_watt: Power used by the device in microwatts * - * This structure stores the voltage/current values for a single power supply. + * This structure stores the voltage/current/power values for a single power + * supply. */ struct dev_pm_opp_supply { unsigned long u_volt; unsigned long u_volt_min; unsigned long u_volt_max; unsigned long u_amp; + unsigned long u_watt; }; /** @@ -94,6 +97,8 @@ void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); +unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp); + unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); @@ -186,6 +191,11 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) return 0; } +static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) +{ + return 0; +} + static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) { return 0; -- cgit v1.2.3-71-gd317 From caeea9e6671984c3865918459d756b961a24bb49 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 2 Mar 2022 11:29:15 +0000 Subject: PM: EM: add macro to set .active_power() callback conditionally The Energy Model is able to use new power values coming from DT. Add a new macro which is helpful in setting the .active_power() callback conditionally in setup time. The dual-macro implementation handles both kernel configurations: w/ EM and w/o EM built-in. Reported-by: kernel test robot Signed-off-by: Lukasz Luba Signed-off-by: Viresh Kumar --- include/linux/energy_model.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 6377adc3b78d..9f3c400bc52d 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -116,6 +116,7 @@ struct em_data_callback { struct device *dev); }; #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } +#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb) struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); @@ -264,6 +265,7 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) #else struct em_data_callback {}; #define EM_DATA_CB(_active_power_cb) { } +#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0) static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, -- cgit v1.2.3-71-gd317 From bf08824a0f4776fc0626b82b6924fa1a5643eacb Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 28 Feb 2022 20:58:56 +0100 Subject: flow_dissector: Add support for HSR Network drivers such as igb or igc call eth_get_headlen() to determine the header length for their to be constructed skbs in receive path. When running HSR on top of these drivers, it results in triggering BUG_ON() in skb_pull(). The reason is the skb headlen is not sufficient for HSR to work correctly. skb_pull() notices that. For instance, eth_get_headlen() returns 14 bytes for TCP traffic over HSR which is not correct. The problem is, the flow dissection code does not take HSR into account. Therefore, add support for it. Reported-by: Anthony Harivel Signed-off-by: Kurt Kanzenbach Link: https://lore.kernel.org/r/20220228195856.88187-1-kurt@linutronix.de Signed-off-by: Jakub Kicinski --- include/linux/if_hsr.h | 16 ++++++++++++++++ net/core/flow_dissector.c | 17 +++++++++++++++++ net/hsr/hsr_main.h | 16 ---------------- 3 files changed, 33 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h index 38bbc537d4e4..408539d5ea5f 100644 --- a/include/linux/if_hsr.h +++ b/include/linux/if_hsr.h @@ -9,6 +9,22 @@ enum hsr_version { PRP_V1, }; +/* HSR Tag. + * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, + * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest, + * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr, + * encapsulated protocol } instead. + * + * Field names as defined in the IEC:2010 standard for HSR. + */ +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +} __packed; + +#define HSR_HLEN 6 + #if IS_ENABLED(CONFIG_HSR) extern bool is_hsr_master(struct net_device *dev); extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 15833e1d6ea1..34441a32e3be 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -1282,6 +1283,22 @@ proto_again: break; } + case htons(ETH_P_HSR): { + struct hsr_tag *hdr, _hdr; + + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, + &_hdr); + if (!hdr) { + fdret = FLOW_DISSECT_RET_OUT_BAD; + break; + } + + proto = hdr->encap_proto; + nhoff += HSR_HLEN; + fdret = FLOW_DISSECT_RET_PROTO_AGAIN; + break; + } + default: fdret = FLOW_DISSECT_RET_OUT_BAD; break; diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index ca556bda3467..b158ba409f9a 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -45,22 +45,6 @@ /* PRP V1 life redundancy box MAC address */ #define PRP_TLV_REDBOX_MAC 30 -/* HSR Tag. - * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, - * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest, - * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr, - * encapsulated protocol } instead. - * - * Field names as defined in the IEC:2010 standard for HSR. - */ -struct hsr_tag { - __be16 path_and_LSDU_size; - __be16 sequence_nr; - __be16 encap_proto; -} __packed; - -#define HSR_HLEN 6 - #define HSR_V1_SUP_LSDUSIZE 52 #define HSR_HSIZE_SHIFT 8 -- cgit v1.2.3-71-gd317 From 9309f97aef6d8250bb484dabeac925c3a7c57716 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:20 +0200 Subject: net: dev: Add hardware stats support Offloading switch device drivers may be able to collect statistics of the traffic taking place in the HW datapath that pertains to a certain soft netdevice, such as VLAN. Add the necessary infrastructure to allow exposing these statistics to the offloaded netdevice in question. The API was shaped by the following considerations: - Collection of HW statistics is not free: there may be a finite number of counters, and the act of counting may have a performance impact. It is therefore necessary to allow toggling whether HW counting should be done for any particular SW netdevice. - As the drivers are loaded and removed, a particular device may get offloaded and unoffloaded again. At the same time, the statistics values need to stay monotonic (modulo the eventual 64-bit wraparound), increasing only to reflect traffic measured in the device. To that end, the netdevice keeps around a lazily-allocated copy of struct rtnl_link_stats64. Device drivers then contribute to the values kept therein at various points. Even as the driver goes away, the struct stays around to maintain the statistics values. - Different HW devices may be able to count different things. The motivation behind this patch in particular is exposure of HW counters on Nvidia Spectrum switches, where the only practical approach to counting traffic on offloaded soft netdevices currently is to use router interface counters, and count L3 traffic. Correspondingly that is the statistics suite added in this patch. Other devices may be able to measure different kinds of traffic, and for that reason, the APIs are built to allow uniform access to different statistics suites. - Because soft netdevices and offloading drivers are only loosely bound, a netdevice uses a notifier chain to communicate with the drivers. Several new notifiers, NETDEV_OFFLOAD_XSTATS_*, have been added to carry messages to the offloading drivers. - Devices can have various conditions for when a particular counter is available. As the device is configured and reconfigured, the device offload may become or cease being suitable for counter binding. A netdevice can use a notifier type NETDEV_OFFLOAD_XSTATS_REPORT_USED to ping offloading drivers and determine whether anyone currently implements a given statistics suite. This information can then be propagated to user space. When the driver decides to unoffload a netdevice, it can use a newly-added function, netdev_offload_xstats_report_delta(), to record outstanding collected statistics, before destroying the HW counter. This patch adds a helper, call_netdevice_notifiers_info_robust(), for dispatching a notifier with the possibility of unwind when one of the consumers bails. Given the wish to eventually get rid of the global notifier block altogether, this helper only invokes the per-netns notifier block. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 42 +++++++ include/uapi/linux/if_link.h | 15 +++ net/core/dev.c | 267 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 323 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c79ee2296296..19a27ac361ef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1950,6 +1950,7 @@ enum netdev_ml_priv_type { * @watchdog_dev_tracker: refcount tracker used by watchdog. * @dev_registered_tracker: tracker for reference held while * registered + * @offload_xstats_l3: L3 HW stats for this netdevice. * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2287,6 +2288,7 @@ struct net_device { netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2727,6 +2729,10 @@ enum netdev_cmd { NETDEV_CVLAN_FILTER_DROP_INFO, NETDEV_SVLAN_FILTER_PUSH_INFO, NETDEV_SVLAN_FILTER_DROP_INFO, + NETDEV_OFFLOAD_XSTATS_ENABLE, + NETDEV_OFFLOAD_XSTATS_DISABLE, + NETDEV_OFFLOAD_XSTATS_REPORT_USED, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); @@ -2777,6 +2783,42 @@ struct netdev_notifier_pre_changeaddr_info { const unsigned char *dev_addr; }; +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; /* must be first */ + enum netdev_offload_xstats_type type; + + union { + /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ + struct netdev_notifier_offload_xstats_rd *report_delta; + /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +int netdev_offload_xstats_enable(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct netlink_ext_ack *extack); +int netdev_offload_xstats_disable(struct net_device *dev, + enum netdev_offload_xstats_type type); +bool netdev_offload_xstats_enabled(const struct net_device *dev, + enum netdev_offload_xstats_type type); +int netdev_offload_xstats_get(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *stats, bool *used, + struct netlink_ext_ack *extack); +void +netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, + const struct rtnl_hw_stats64 *stats); +void +netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); +void netdev_offload_xstats_push_delta(struct net_device *dev, + enum netdev_offload_xstats_type type, + const struct rtnl_hw_stats64 *stats); + static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 4d62ea6e1288..ef6a62a2e15d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -245,6 +245,21 @@ struct rtnl_link_stats64 { __u64 rx_nohandler; }; +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; diff --git a/net/core/dev.c b/net/core/dev.c index 2d6771075720..c9e54e5ad48d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1622,7 +1622,8 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) - N(PRE_CHANGEADDR) + N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) + N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) } #undef N return "UNKNOWN_NETDEV_EVENT"; @@ -1939,6 +1940,32 @@ static int call_netdevice_notifiers_info(unsigned long val, return raw_notifier_call_chain(&netdev_chain, val, info); } +/** + * call_netdevice_notifiers_info_robust - call per-netns notifier blocks + * for and rollback on error + * @val_up: value passed unmodified to notifier function + * @val_down: value passed unmodified to the notifier function when + * recovering from an error on @val_up + * @info: notifier information data + * + * Call all per-netns network notifier blocks, but not notifier blocks on + * the global notifier chain. Parameters and return value are as for + * raw_notifier_call_chain_robust(). + */ + +static int +call_netdevice_notifiers_info_robust(unsigned long val_up, + unsigned long val_down, + struct netdev_notifier_info *info) +{ + struct net *net = dev_net(info->dev); + + ASSERT_RTNL(); + + return raw_notifier_call_chain_robust(&net->netdev_chain, + val_up, val_down, info); +} + static int call_netdevice_notifiers_extack(unsigned long val, struct net_device *dev, struct netlink_ext_ack *extack) @@ -7728,6 +7755,242 @@ void netdev_bonding_info_change(struct net_device *dev, } EXPORT_SYMBOL(netdev_bonding_info_change); +static int netdev_offload_xstats_enable_l3(struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, + }; + int err; + int rc; + + dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), + GFP_KERNEL); + if (!dev->offload_xstats_l3) + return -ENOMEM; + + rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, + NETDEV_OFFLOAD_XSTATS_DISABLE, + &info.info); + err = notifier_to_errno(rc); + if (err) + goto free_stats; + + return 0; + +free_stats: + kfree(dev->offload_xstats_l3); + dev->offload_xstats_l3 = NULL; + return err; +} + +int netdev_offload_xstats_enable(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct netlink_ext_ack *extack) +{ + ASSERT_RTNL(); + + if (netdev_offload_xstats_enabled(dev, type)) + return -EALREADY; + + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + return netdev_offload_xstats_enable_l3(dev, extack); + } + + WARN_ON(1); + return -EINVAL; +} +EXPORT_SYMBOL(netdev_offload_xstats_enable); + +static void netdev_offload_xstats_disable_l3(struct net_device *dev) +{ + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, + }; + + call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, + &info.info); + kfree(dev->offload_xstats_l3); + dev->offload_xstats_l3 = NULL; +} + +int netdev_offload_xstats_disable(struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + ASSERT_RTNL(); + + if (!netdev_offload_xstats_enabled(dev, type)) + return -EALREADY; + + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + netdev_offload_xstats_disable_l3(dev); + return 0; + } + + WARN_ON(1); + return -EINVAL; +} +EXPORT_SYMBOL(netdev_offload_xstats_disable); + +static void netdev_offload_xstats_disable_all(struct net_device *dev) +{ + netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); +} + +static struct rtnl_hw_stats64 * +netdev_offload_xstats_get_ptr(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + return dev->offload_xstats_l3; + } + + WARN_ON(1); + return NULL; +} + +bool netdev_offload_xstats_enabled(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + ASSERT_RTNL(); + + return netdev_offload_xstats_get_ptr(dev, type); +} +EXPORT_SYMBOL(netdev_offload_xstats_enabled); + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; +}; + +static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, + const struct rtnl_hw_stats64 *src) +{ + dest->rx_packets += src->rx_packets; + dest->tx_packets += src->tx_packets; + dest->rx_bytes += src->rx_bytes; + dest->tx_bytes += src->tx_bytes; + dest->rx_errors += src->rx_errors; + dest->tx_errors += src->tx_errors; + dest->rx_dropped += src->rx_dropped; + dest->tx_dropped += src->tx_dropped; + dest->multicast += src->multicast; +} + +static int netdev_offload_xstats_get_used(struct net_device *dev, + enum netdev_offload_xstats_type type, + bool *p_used, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_ru report_used = {}; + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = type, + .report_used = &report_used, + }; + int rc; + + WARN_ON(!netdev_offload_xstats_enabled(dev, type)); + rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, + &info.info); + *p_used = report_used.used; + return notifier_to_errno(rc); +} + +static int netdev_offload_xstats_get_stats(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *p_stats, + bool *p_used, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_rd report_delta = {}; + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = type, + .report_delta = &report_delta, + }; + struct rtnl_hw_stats64 *stats; + int rc; + + stats = netdev_offload_xstats_get_ptr(dev, type); + if (WARN_ON(!stats)) + return -EINVAL; + + rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, + &info.info); + + /* Cache whatever we got, even if there was an error, otherwise the + * successful stats retrievals would get lost. + */ + netdev_hw_stats64_add(stats, &report_delta.stats); + + if (p_stats) + *p_stats = *stats; + *p_used = report_delta.used; + + return notifier_to_errno(rc); +} + +int netdev_offload_xstats_get(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *p_stats, bool *p_used, + struct netlink_ext_ack *extack) +{ + ASSERT_RTNL(); + + if (p_stats) + return netdev_offload_xstats_get_stats(dev, type, p_stats, + p_used, extack); + else + return netdev_offload_xstats_get_used(dev, type, p_used, + extack); +} +EXPORT_SYMBOL(netdev_offload_xstats_get); + +void +netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, + const struct rtnl_hw_stats64 *stats) +{ + report_delta->used = true; + netdev_hw_stats64_add(&report_delta->stats, stats); +} +EXPORT_SYMBOL(netdev_offload_xstats_report_delta); + +void +netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) +{ + report_used->used = true; +} +EXPORT_SYMBOL(netdev_offload_xstats_report_used); + +void netdev_offload_xstats_push_delta(struct net_device *dev, + enum netdev_offload_xstats_type type, + const struct rtnl_hw_stats64 *p_stats) +{ + struct rtnl_hw_stats64 *stats; + + ASSERT_RTNL(); + + stats = netdev_offload_xstats_get_ptr(dev, type); + if (WARN_ON(!stats)) + return; + + netdev_hw_stats64_add(stats, p_stats); +} +EXPORT_SYMBOL(netdev_offload_xstats_push_delta); + /** * netdev_get_xmit_slave - Get the xmit slave of master device * @dev: device @@ -10417,6 +10680,8 @@ void unregister_netdevice_many(struct list_head *head) dev_xdp_uninstall(dev); + netdev_offload_xstats_disable_all(dev); + /* Notify protocols, that we are about to destroy * this device. They should clean all the things. */ -- cgit v1.2.3-71-gd317 From 5fd0b838efac16046509f7fb100455d0463b9687 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:23 +0200 Subject: net: rtnetlink: Add UAPI toggle for IFLA_OFFLOAD_XSTATS_L3_STATS The offloaded HW stats are designed to allow per-netdevice enablement and disablement. Add an attribute, IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, which should be carried by the RTM_SETSTATS message, and expresses a desire to toggle L3 offload xstats on or off. As part of the above, add an exported function rtnl_offload_xstats_notify() that drivers can use when they have installed or deinstalled the counters backing the HW stats. At this point, it is possible to enable, disable and query L3 offload xstats on netdevices. (However there is no driver actually implementing these.) Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 3 ++ include/uapi/linux/if_link.h | 1 + include/uapi/linux/rtnetlink.h | 2 ++ net/core/rtnetlink.c | 75 ++++++++++++++++++++++++++++++++---------- 4 files changed, 64 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bb9cb84114c1..7f970b16da3a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -134,4 +134,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int (*vlan_fill)(struct sk_buff *skb, struct net_device *dev, u32 filter_mask)); + +extern void rtnl_offload_xstats_notify(struct net_device *dev); + #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b1031f481d2f..ddca20357e7e 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1227,6 +1227,7 @@ enum { IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with * a filter mask for the corresponding group. */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ __IFLA_STATS_GETSET_MAX, }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 14462dc159fd..51530aade46e 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -767,6 +767,8 @@ enum rtnetlink_groups { #define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR RTNLGRP_TUNNEL, #define RTNLGRP_TUNNEL RTNLGRP_TUNNEL + RTNLGRP_STATS, +#define RTNLGRP_STATS RTNLGRP_STATS __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d09354514355..a66b6761b88b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5566,6 +5566,7 @@ rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { static const struct nla_policy ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { + [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), }; static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, @@ -5773,16 +5774,51 @@ out: return skb->len; } +void rtnl_offload_xstats_notify(struct net_device *dev) +{ + struct rtnl_stats_dump_filters response_filters = {}; + struct net *net = dev_net(dev); + int idxattr = 0, prividx = 0; + struct sk_buff *skb; + int err = -ENOBUFS; + + ASSERT_RTNL(); + + response_filters.mask[0] |= + IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); + response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); + + skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), + GFP_KERNEL); + if (!skb) + goto errout; + + err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, + &response_filters, &idxattr, &prividx, NULL); + if (err < 0) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); + return; + +errout: + rtnl_set_sk_err(net, RTNLGRP_STATS, err); +} +EXPORT_SYMBOL(rtnl_offload_xstats_notify); + static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; struct rtnl_stats_dump_filters response_filters = {}; struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; struct net *net = sock_net(skb->sk); struct net_device *dev = NULL; - int idxattr = 0, prividx = 0; struct if_stats_msg *ifsm; - struct sk_buff *nskb; + bool notify = false; int err; err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), @@ -5814,24 +5850,29 @@ static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) return err; - nskb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), - GFP_KERNEL); - if (!nskb) - return -ENOBUFS; + if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { + u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); - err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, - NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - 0, &response_filters, &idxattr, &prividx, - extack); - if (err < 0) { - /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(nskb); - } else { - err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); + if (req) + err = netdev_offload_xstats_enable(dev, t_l3, extack); + else + err = netdev_offload_xstats_disable(dev, t_l3); + + if (!err) + notify = true; + else if (err != -EALREADY) + return err; + + response_filters.mask[0] |= + IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); + response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); } - return err; + if (notify) + rtnl_offload_xstats_notify(dev); + + return 0; } /* Process one rtnetlink message. */ -- cgit v1.2.3-71-gd317 From 445ad495f0ff71553693e6a2a9d7a3bc6917ca36 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:17 +0200 Subject: vfio: Have the core code decode the VFIO_DEVICE_FEATURE ioctl Invoke a new device op 'device_feature' to handle just the data array portion of the command. This lifts the ioctl validation to the core code and makes it simpler for either the core code, or layered drivers, to implement their own feature values. Provide vfio_check_feature() to consolidate checking the flags/etc against what the driver supports. Link: https://lore.kernel.org/all/20220224142024.147653-9-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Alex Williamson Reviewed-by: Cornelia Huck Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/pci/vfio_pci.c | 1 + drivers/vfio/pci/vfio_pci_core.c | 94 ++++++++++++++++------------------------ drivers/vfio/vfio.c | 46 ++++++++++++++++++-- include/linux/vfio.h | 32 ++++++++++++++ include/linux/vfio_pci_core.h | 2 + 5 files changed, 114 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a5ce92beb655..2b047469e02f 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -130,6 +130,7 @@ static const struct vfio_device_ops vfio_pci_ops = { .open_device = vfio_pci_open_device, .close_device = vfio_pci_core_close_device, .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, .read = vfio_pci_core_read, .write = vfio_pci_core_write, .mmap = vfio_pci_core_mmap, diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index f948e6cd2993..106e1970d653 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1114,70 +1114,50 @@ hot_reset_release: return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count, ioeventfd.fd); - } else if (cmd == VFIO_DEVICE_FEATURE) { - struct vfio_device_feature feature; - uuid_t uuid; - - minsz = offsetofend(struct vfio_device_feature, flags); - - if (copy_from_user(&feature, (void __user *)arg, minsz)) - return -EFAULT; - - if (feature.argsz < minsz) - return -EINVAL; - - /* Check unknown flags */ - if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK | - VFIO_DEVICE_FEATURE_SET | - VFIO_DEVICE_FEATURE_GET | - VFIO_DEVICE_FEATURE_PROBE)) - return -EINVAL; - - /* GET & SET are mutually exclusive except with PROBE */ - if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && - (feature.flags & VFIO_DEVICE_FEATURE_SET) && - (feature.flags & VFIO_DEVICE_FEATURE_GET)) - return -EINVAL; - - switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { - case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN: - if (!vdev->vf_token) - return -ENOTTY; - - /* - * We do not support GET of the VF Token UUID as this - * could expose the token of the previous device user. - */ - if (feature.flags & VFIO_DEVICE_FEATURE_GET) - return -EINVAL; - - if (feature.flags & VFIO_DEVICE_FEATURE_PROBE) - return 0; + } + return -ENOTTY; +} +EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl); - /* Don't SET unless told to do so */ - if (!(feature.flags & VFIO_DEVICE_FEATURE_SET)) - return -EINVAL; +static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz) +{ + struct vfio_pci_core_device *vdev = + container_of(device, struct vfio_pci_core_device, vdev); + uuid_t uuid; + int ret; - if (feature.argsz < minsz + sizeof(uuid)) - return -EINVAL; + if (!vdev->vf_token) + return -ENOTTY; + /* + * We do not support GET of the VF Token UUID as this could + * expose the token of the previous device user. + */ + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, + sizeof(uuid)); + if (ret != 1) + return ret; - if (copy_from_user(&uuid, (void __user *)(arg + minsz), - sizeof(uuid))) - return -EFAULT; + if (copy_from_user(&uuid, arg, sizeof(uuid))) + return -EFAULT; - mutex_lock(&vdev->vf_token->lock); - uuid_copy(&vdev->vf_token->uuid, &uuid); - mutex_unlock(&vdev->vf_token->lock); + mutex_lock(&vdev->vf_token->lock); + uuid_copy(&vdev->vf_token->uuid, &uuid); + mutex_unlock(&vdev->vf_token->lock); + return 0; +} - return 0; - default: - return -ENOTTY; - } +int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz) +{ + switch (flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN: + return vfio_pci_core_feature_token(device, flags, arg, argsz); + default: + return -ENOTTY; } - - return -ENOTTY; } -EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl); +EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature); static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 735d1d344af9..71763e2ac561 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1557,15 +1557,53 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) return 0; } +static int vfio_ioctl_device_feature(struct vfio_device *device, + struct vfio_device_feature __user *arg) +{ + size_t minsz = offsetofend(struct vfio_device_feature, flags); + struct vfio_device_feature feature; + + if (copy_from_user(&feature, arg, minsz)) + return -EFAULT; + + if (feature.argsz < minsz) + return -EINVAL; + + /* Check unknown flags */ + if (feature.flags & + ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE)) + return -EINVAL; + + /* GET & SET are mutually exclusive except with PROBE */ + if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && + (feature.flags & VFIO_DEVICE_FEATURE_SET) && + (feature.flags & VFIO_DEVICE_FEATURE_GET)) + return -EINVAL; + + switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { + default: + if (unlikely(!device->ops->device_feature)) + return -EINVAL; + return device->ops->device_feature(device, feature.flags, + arg->data, + feature.argsz - minsz); + } +} + static long vfio_device_fops_unl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct vfio_device *device = filep->private_data; - if (unlikely(!device->ops->ioctl)) - return -EINVAL; - - return device->ops->ioctl(device, cmd, arg); + switch (cmd) { + case VFIO_DEVICE_FEATURE: + return vfio_ioctl_device_feature(device, (void __user *)arg); + default: + if (unlikely(!device->ops->ioctl)) + return -EINVAL; + return device->ops->ioctl(device, cmd, arg); + } } static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 76191d7abed1..550c28f2ef60 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -55,6 +55,7 @@ struct vfio_device { * @match: Optional device name match callback (return: 0 for no-match, >0 for * match, -errno for abort (ex. match with insufficient or incorrect * additional args) + * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl */ struct vfio_device_ops { char *name; @@ -69,8 +70,39 @@ struct vfio_device_ops { int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); void (*request)(struct vfio_device *vdev, unsigned int count); int (*match)(struct vfio_device *vdev, char *buf); + int (*device_feature)(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz); }; +/** + * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl + * @flags: Arg from the device_feature op + * @argsz: Arg from the device_feature op + * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver + * supports + * @minsz: Minimum data size the driver accepts + * + * For use in a driver's device_feature op. Checks that the inputs to the + * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if + * the driver should execute the get or set, otherwise the relevant + * value should be returned. + */ +static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops, + size_t minsz) +{ + if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) & + ~supported_ops) + return -EINVAL; + if (flags & VFIO_DEVICE_FEATURE_PROBE) + return 0; + /* Without PROBE one of GET or SET must be requested */ + if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET))) + return -EINVAL; + if (argsz < minsz) + return -EINVAL; + return 1; +} + void vfio_init_group_dev(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); void vfio_uninit_group_dev(struct vfio_device *device); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index ef9a44b6cf5d..beba0b2ed87d 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -220,6 +220,8 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn); extern const struct pci_error_handlers vfio_pci_core_err_handlers; long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, unsigned long arg); +int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz); ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, size_t count, loff_t *ppos); ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, -- cgit v1.2.3-71-gd317 From 115dcec65f61d53e25e1bed5e380468b30f98b14 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:18 +0200 Subject: vfio: Define device migration protocol v2 Replace the existing region based migration protocol with an ioctl based protocol. The two protocols have the same general semantic behaviors, but the way the data is transported is changed. This is the STOP_COPY portion of the new protocol, it defines the 5 states for basic stop and copy migration and the protocol to move the migration data in/out of the kernel. Compared to the clarification of the v1 protocol Alex proposed: https://lore.kernel.org/r/163909282574.728533.7460416142511440919.stgit@omen This has a few deliberate functional differences: - ERROR arcs allow the device function to remain unchanged. - The protocol is not required to return to the original state on transition failure. Instead userspace can execute an unwind back to the original state, reset, or do something else without needing kernel support. This simplifies the kernel design and should userspace choose a policy like always reset, avoids doing useless work in the kernel on error handling paths. - PRE_COPY is made optional, userspace must discover it before using it. This reflects the fact that the majority of drivers we are aware of right now will not implement PRE_COPY. - segmentation is not part of the data stream protocol, the receiver does not have to reproduce the framing boundaries. The hybrid FSM for the device_state is described as a Mealy machine by documenting each of the arcs the driver is required to implement. Defining the remaining set of old/new device_state transitions as 'combination transitions' which are naturally defined as taking multiple FSM arcs along the shortest path within the FSM's digraph allows a complete matrix of transitions. A new VFIO_DEVICE_FEATURE of VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE is defined to replace writing to the device_state field in the region. This allows returning a brand new FD whenever the requested transition opens a data transfer session. The VFIO core code implements the new feature and provides a helper function to the driver. Using the helper the driver only has to implement 6 of the FSM arcs and the other combination transitions are elaborated consistently from those arcs. A new VFIO_DEVICE_FEATURE of VFIO_DEVICE_FEATURE_MIGRATION is defined to report the capability for migration and indicate which set of states and arcs are supported by the device. The FSM provides a lot of flexibility to make backwards compatible extensions but the VFIO_DEVICE_FEATURE also allows for future breaking extensions for scenarios that cannot support even the basic STOP_COPY requirements. The VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE with the GET option (i.e. VFIO_DEVICE_FEATURE_GET) can be used to read the current migration state of the VFIO device. Data transfer sessions are now carried over a file descriptor, instead of the region. The FD functions for the lifetime of the data transfer session. read() and write() transfer the data with normal Linux stream FD semantics. This design allows future expansion to support poll(), io_uring, and other performance optimizations. The complicated mmap mode for data transfer is discarded as current qemu doesn't take meaningful advantage of it, and the new qemu implementation avoids substantially all the performance penalty of using a read() on the region. Link: https://lore.kernel.org/all/20220224142024.147653-10-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Reviewed-by: Cornelia Huck Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/vfio.c | 199 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 20 +++++ include/uapi/linux/vfio.h | 174 +++++++++++++++++++++++++++++++++++++--- 3 files changed, 380 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 71763e2ac561..b37ab27b511f 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1557,6 +1557,197 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) return 0; } +/* + * vfio_mig_get_next_state - Compute the next step in the FSM + * @cur_fsm - The current state the device is in + * @new_fsm - The target state to reach + * @next_fsm - Pointer to the next step to get to new_fsm + * + * Return 0 upon success, otherwise -errno + * Upon success the next step in the state progression between cur_fsm and + * new_fsm will be set in next_fsm. + * + * This breaks down requests for combination transitions into smaller steps and + * returns the next step to get to new_fsm. The function may need to be called + * multiple times before reaching new_fsm. + * + */ +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm) +{ + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RESUMING + 1 }; + /* + * The coding in this table requires the driver to implement 6 + * FSM arcs: + * RESUMING -> STOP + * RUNNING -> STOP + * STOP -> RESUMING + * STOP -> RUNNING + * STOP -> STOP_COPY + * STOP_COPY -> STOP + * + * The coding will step through multiple states for these combination + * transitions: + * RESUMING -> STOP -> RUNNING + * RESUMING -> STOP -> STOP_COPY + * RUNNING -> STOP -> RESUMING + * RUNNING -> STOP -> STOP_COPY + * STOP_COPY -> STOP -> RESUMING + * STOP_COPY -> STOP -> RUNNING + */ + static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_STOP_COPY] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RESUMING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_ERROR] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + }; + + if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table))) + return -EINVAL; + + if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table)) + return -EINVAL; + + *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; + return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); + +/* + * Convert the drivers's struct file into a FD number and return it to userspace + */ +static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg, + struct vfio_device_feature_mig_state *mig) +{ + int ret; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out_fput; + } + + mig->data_fd = fd; + if (copy_to_user(arg, mig, sizeof(*mig))) { + ret = -EFAULT; + goto out_put_unused; + } + fd_install(fd, filp); + return 0; + +out_put_unused: + put_unused_fd(fd); +out_fput: + fput(filp); + return ret; +} + +static int +vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + size_t minsz = + offsetofend(struct vfio_device_feature_mig_state, data_fd); + struct vfio_device_feature_mig_state mig; + struct file *filp = NULL; + int ret; + + if (!device->ops->migration_set_state || + !device->ops->migration_get_state) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + + if (copy_from_user(&mig, arg, minsz)) + return -EFAULT; + + if (flags & VFIO_DEVICE_FEATURE_GET) { + enum vfio_device_mig_state curr_state; + + ret = device->ops->migration_get_state(device, &curr_state); + if (ret) + return ret; + mig.device_state = curr_state; + goto out_copy; + } + + /* Handle the VFIO_DEVICE_FEATURE_SET */ + filp = device->ops->migration_set_state(device, mig.device_state); + if (IS_ERR(filp) || !filp) + goto out_copy; + + return vfio_ioct_mig_return_fd(filp, arg, &mig); +out_copy: + mig.data_fd = -1; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + if (IS_ERR(filp)) + return PTR_ERR(filp); + return 0; +} + +static int vfio_ioctl_device_feature_migration(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + struct vfio_device_feature_migration mig = { + .flags = VFIO_MIGRATION_STOP_COPY, + }; + int ret; + + if (!device->ops->migration_set_state || + !device->ops->migration_get_state) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + return 0; +} + static int vfio_ioctl_device_feature(struct vfio_device *device, struct vfio_device_feature __user *arg) { @@ -1582,6 +1773,14 @@ static int vfio_ioctl_device_feature(struct vfio_device *device, return -EINVAL; switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_MIGRATION: + return vfio_ioctl_device_feature_migration( + device, feature.flags, arg->data, + feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: + return vfio_ioctl_device_feature_mig_device_state( + device, feature.flags, arg->data, + feature.argsz - minsz); default: if (unlikely(!device->ops->device_feature)) return -EINVAL; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 550c28f2ef60..c44e80bbbd3b 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -56,6 +56,16 @@ struct vfio_device { * match, -errno for abort (ex. match with insufficient or incorrect * additional args) * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl + * @migration_set_state: Optional callback to change the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. + * The returned FD is used for data transfer according to the FSM + * definition. The driver is responsible to ensure that FD reaches end + * of stream or error whenever the migration FSM leaves a data transfer + * state or before close_device() returns. + * @migration_get_state: Optional callback to get the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. */ struct vfio_device_ops { char *name; @@ -72,6 +82,11 @@ struct vfio_device_ops { int (*match)(struct vfio_device *vdev, char *buf); int (*device_feature)(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz); + struct file *(*migration_set_state)( + struct vfio_device *device, + enum vfio_device_mig_state new_state); + int (*migration_get_state)(struct vfio_device *device, + enum vfio_device_mig_state *curr_state); }; /** @@ -114,6 +129,11 @@ extern void vfio_device_put(struct vfio_device *device); int vfio_assign_device_set(struct vfio_device *device, void *set_id); +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm); + /* * External user API */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index ef33ea002b0b..22ed358c04c5 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -605,25 +605,25 @@ struct vfio_region_gfx_edid { struct vfio_device_migration_info { __u32 device_state; /* VFIO device state */ -#define VFIO_DEVICE_STATE_STOP (0) -#define VFIO_DEVICE_STATE_RUNNING (1 << 0) -#define VFIO_DEVICE_STATE_SAVING (1 << 1) -#define VFIO_DEVICE_STATE_RESUMING (1 << 2) -#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ - VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) +#define VFIO_DEVICE_STATE_V1_STOP (0) +#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0) +#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1) +#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2) +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \ + VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) #define VFIO_DEVICE_STATE_VALID(state) \ - (state & VFIO_DEVICE_STATE_RESUMING ? \ - (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) + (state & VFIO_DEVICE_STATE_V1_RESUMING ? \ + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1) #define VFIO_DEVICE_STATE_IS_ERROR(state) \ - ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING)) + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING)) #define VFIO_DEVICE_STATE_SET_ERROR(state) \ - ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; __u64 pending_bytes; @@ -1002,6 +1002,154 @@ struct vfio_device_feature { */ #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) +/* + * Indicates the device can support the migration API through + * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and + * ERROR states are always supported. Support for additional states is + * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be + * set. + * + * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and + * RESUMING are supported. + */ +struct vfio_device_feature_migration { + __aligned_u64 flags; +#define VFIO_MIGRATION_STOP_COPY (1 << 0) +}; +#define VFIO_DEVICE_FEATURE_MIGRATION 1 + +/* + * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO + * device. The new state is supplied in device_state, see enum + * vfio_device_mig_state for details + * + * The kernel migration driver must fully transition the device to the new state + * value before the operation returns to the user. + * + * The kernel migration driver must not generate asynchronous device state + * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET + * ioctl as described above. + * + * If this function fails then current device_state may be the original + * operating state or some other state along the combination transition path. + * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt + * to return to the original state, or attempt to return to some other state + * such as RUNNING or STOP. + * + * If the new_state starts a new data transfer session then the FD associated + * with that session is returned in data_fd. The user is responsible to close + * this FD when it is finished. The user must consider the migration data stream + * carried over the FD to be opaque and must preserve the byte order of the + * stream. The user is not required to preserve buffer segmentation when writing + * the data stream during the RESUMING operation. + * + * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO + * device, data_fd will be -1. + */ +struct vfio_device_feature_mig_state { + __u32 device_state; /* From enum vfio_device_mig_state */ + __s32 data_fd; +}; +#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2 + +/* + * The device migration Finite State Machine is described by the enum + * vfio_device_mig_state. Some of the FSM arcs will create a migration data + * transfer session by returning a FD, in this case the migration data will + * flow over the FD using read() and write() as discussed below. + * + * There are 5 states to support VFIO_MIGRATION_STOP_COPY: + * RUNNING - The device is running normally + * STOP - The device does not change the internal or external state + * STOP_COPY - The device internal state can be read out + * RESUMING - The device is stopped and is loading a new internal state + * ERROR - The device has failed and must be reset + * + * The FSM takes actions on the arcs between FSM states. The driver implements + * the following behavior for the FSM arcs: + * + * RUNNING -> STOP + * STOP_COPY -> STOP + * While in STOP the device must stop the operation of the device. The device + * must not generate interrupts, DMA, or any other change to external state. + * It must not change its internal state. When stopped the device and kernel + * migration driver must accept and respond to interaction to support external + * subsystems in the STOP state, for example PCI MSI-X and PCI config space. + * Failure by the user to restrict device access while in STOP must not result + * in error conditions outside the user context (ex. host system faults). + * + * The STOP_COPY arc will terminate a data transfer session. + * + * RESUMING -> STOP + * Leaving RESUMING terminates a data transfer session and indicates the + * device should complete processing of the data delivered by write(). The + * kernel migration driver should complete the incorporation of data written + * to the data transfer FD into the device internal state and perform + * final validity and consistency checking of the new device state. If the + * user provided data is found to be incomplete, inconsistent, or otherwise + * invalid, the migration driver must fail the SET_STATE ioctl and + * optionally go to the ERROR state as described below. + * + * While in STOP the device has the same behavior as other STOP states + * described above. + * + * To abort a RESUMING session the device must be reset. + * + * STOP -> RUNNING + * While in RUNNING the device is fully operational, the device may generate + * interrupts, DMA, respond to MMIO, all vfio device regions are functional, + * and the device may advance its internal state. + * + * STOP -> STOP_COPY + * This arc begin the process of saving the device state and will return a + * new data_fd. + * + * While in the STOP_COPY state the device has the same behavior as STOP + * with the addition that the data transfers session continues to stream the + * migration state. End of stream on the FD indicates the entire device + * state has been transferred. + * + * The user should take steps to restrict access to vfio device regions while + * the device is in STOP_COPY or risk corruption of the device migration data + * stream. + * + * STOP -> RESUMING + * Entering the RESUMING state starts a process of restoring the device state + * and will return a new data_fd. The data stream fed into the data_fd should + * be taken from the data transfer output of a single FD during saving from + * a compatible device. The migration driver may alter/reset the internal + * device state for this arc if required to prepare the device to receive the + * migration data. + * + * any -> ERROR + * ERROR cannot be specified as a device state, however any transition request + * can be failed with an errno return and may then move the device_state into + * ERROR. In this case the device was unable to execute the requested arc and + * was also unable to restore the device to any valid device_state. + * To recover from ERROR VFIO_DEVICE_RESET must be used to return the + * device_state back to RUNNING. + * + * The remaining possible transitions are interpreted as combinations of the + * above FSM arcs. As there are multiple paths through the FSM arcs the path + * should be selected based on the following rules: + * - Select the shortest path. + * Refer to vfio_mig_get_next_state() for the result of the algorithm. + * + * The automatic transit through the FSM arcs that make up the combination + * transition is invisible to the user. When working with combination arcs the + * user may see any step along the path in the device_state if SET_STATE + * fails. When handling these types of errors users should anticipate future + * revisions of this protocol using new states and those states becoming + * visible in this case. + */ +enum vfio_device_mig_state { + VFIO_DEVICE_STATE_ERROR = 0, + VFIO_DEVICE_STATE_STOP = 1, + VFIO_DEVICE_STATE_RUNNING = 2, + VFIO_DEVICE_STATE_STOP_COPY = 3, + VFIO_DEVICE_STATE_RESUMING = 4, +}; + /* -------- API for Type1 VFIO IOMMU -------- */ /** -- cgit v1.2.3-71-gd317 From 8cb3d83b959be0631cd719b995c40c3cda21cd47 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:19 +0200 Subject: vfio: Extend the device migration protocol with RUNNING_P2P The RUNNING_P2P state is designed to support multiple devices in the same VM that are doing P2P transactions between themselves. When in RUNNING_P2P the device must be able to accept incoming P2P transactions but should not generate outgoing P2P transactions. As an optional extension to the mandatory states it is defined as in between STOP and RUNNING: STOP -> RUNNING_P2P -> RUNNING -> RUNNING_P2P -> STOP For drivers that are unable to support RUNNING_P2P the core code silently merges RUNNING_P2P and RUNNING together. Unless driver support is present, the new state cannot be used in SET_STATE. Drivers that support this will be required to implement 4 FSM arcs beyond the basic FSM. 2 of the basic FSM arcs become combination transitions. Compared to the v1 clarification, NDMA is redefined into FSM states and is described in terms of the desired P2P quiescent behavior, noting that halting all DMA is an acceptable implementation. Link: https://lore.kernel.org/all/20220224142024.147653-11-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/vfio.c | 87 +++++++++++++++++++++++++++++++++++++---------- include/linux/vfio.h | 1 + include/uapi/linux/vfio.h | 36 ++++++++++++++++++-- 3 files changed, 104 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index b37ab27b511f..a4555014bd1e 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1577,39 +1577,56 @@ int vfio_mig_get_next_state(struct vfio_device *device, enum vfio_device_mig_state new_fsm, enum vfio_device_mig_state *next_fsm) { - enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RESUMING + 1 }; + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 }; /* - * The coding in this table requires the driver to implement 6 - * FSM arcs: + * The coding in this table requires the driver to implement the + * following FSM arcs: * RESUMING -> STOP - * RUNNING -> STOP * STOP -> RESUMING - * STOP -> RUNNING * STOP -> STOP_COPY * STOP_COPY -> STOP * - * The coding will step through multiple states for these combination - * transitions: - * RESUMING -> STOP -> RUNNING + * If P2P is supported then the driver must also implement these FSM + * arcs: + * RUNNING -> RUNNING_P2P + * RUNNING_P2P -> RUNNING + * RUNNING_P2P -> STOP + * STOP -> RUNNING_P2P + * Without P2P the driver must implement: + * RUNNING -> STOP + * STOP -> RUNNING + * + * The coding will step through multiple states for some combination + * transitions; if all optional features are supported, this means the + * following ones: + * RESUMING -> STOP -> RUNNING_P2P + * RESUMING -> STOP -> RUNNING_P2P -> RUNNING * RESUMING -> STOP -> STOP_COPY - * RUNNING -> STOP -> RESUMING - * RUNNING -> STOP -> STOP_COPY + * RUNNING -> RUNNING_P2P -> STOP + * RUNNING -> RUNNING_P2P -> STOP -> RESUMING + * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY + * RUNNING_P2P -> STOP -> RESUMING + * RUNNING_P2P -> STOP -> STOP_COPY + * STOP -> RUNNING_P2P -> RUNNING * STOP_COPY -> STOP -> RESUMING - * STOP_COPY -> STOP -> RUNNING + * STOP_COPY -> STOP -> RUNNING_P2P + * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING */ static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, - [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RUNNING] = { - [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, - [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, - [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_STOP_COPY] = { @@ -1617,6 +1634,7 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RESUMING] = { @@ -1624,6 +1642,15 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING_P2P] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_ERROR] = { @@ -1631,17 +1658,41 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, }; - if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table))) + static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING_P2P] = + VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P, + [VFIO_DEVICE_STATE_ERROR] = ~0U, + }; + + if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[cur_fsm] & device->migration_flags) != + state_flags_table[cur_fsm])) return -EINVAL; - if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table)) + if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[new_fsm] & device->migration_flags) != + state_flags_table[new_fsm]) return -EINVAL; + /* + * Arcs touching optional and unsupported states are skipped over. The + * driver will instead see an arc from the original state to the next + * logical state, as per the above comment. + */ *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; + while ((state_flags_table[*next_fsm] & device->migration_flags) != + state_flags_table[*next_fsm]) + *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm]; + return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); @@ -1731,7 +1782,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device, size_t argsz) { struct vfio_device_feature_migration mig = { - .flags = VFIO_MIGRATION_STOP_COPY, + .flags = device->migration_flags, }; int ret; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index c44e80bbbd3b..66dda06ec42d 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -33,6 +33,7 @@ struct vfio_device { struct vfio_group *group; struct vfio_device_set *dev_set; struct list_head dev_set_list; + unsigned int migration_flags; /* Members below here are private, not for driver use */ refcount_t refcount; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 22ed358c04c5..26a66f68371d 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -1011,10 +1011,16 @@ struct vfio_device_feature { * * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and * RESUMING are supported. + * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P + * is supported in addition to the STOP_COPY states. + * + * Other combinations of flags have behavior to be defined in the future. */ struct vfio_device_feature_migration { __aligned_u64 flags; #define VFIO_MIGRATION_STOP_COPY (1 << 0) +#define VFIO_MIGRATION_P2P (1 << 1) }; #define VFIO_DEVICE_FEATURE_MIGRATION 1 @@ -1065,10 +1071,13 @@ struct vfio_device_feature_mig_state { * RESUMING - The device is stopped and is loading a new internal state * ERROR - The device has failed and must be reset * + * And 1 optional state to support VFIO_MIGRATION_P2P: + * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA + * * The FSM takes actions on the arcs between FSM states. The driver implements * the following behavior for the FSM arcs: * - * RUNNING -> STOP + * RUNNING_P2P -> STOP * STOP_COPY -> STOP * While in STOP the device must stop the operation of the device. The device * must not generate interrupts, DMA, or any other change to external state. @@ -1095,11 +1104,16 @@ struct vfio_device_feature_mig_state { * * To abort a RESUMING session the device must be reset. * - * STOP -> RUNNING + * RUNNING_P2P -> RUNNING * While in RUNNING the device is fully operational, the device may generate * interrupts, DMA, respond to MMIO, all vfio device regions are functional, * and the device may advance its internal state. * + * RUNNING -> RUNNING_P2P + * STOP -> RUNNING_P2P + * While in RUNNING_P2P the device is partially running in the P2P quiescent + * state defined below. + * * STOP -> STOP_COPY * This arc begin the process of saving the device state and will return a * new data_fd. @@ -1129,6 +1143,18 @@ struct vfio_device_feature_mig_state { * To recover from ERROR VFIO_DEVICE_RESET must be used to return the * device_state back to RUNNING. * + * The optional peer to peer (P2P) quiescent state is intended to be a quiescent + * state for the device for the purposes of managing multiple devices within a + * user context where peer-to-peer DMA between devices may be active. The + * RUNNING_P2P states must prevent the device from initiating + * any new P2P DMA transactions. If the device can identify P2P transactions + * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration + * driver must complete any such outstanding operations prior to completing the + * FSM arc into a P2P state. For the purpose of specification the states + * behave as though the device was fully running if not supported. Like while in + * STOP or STOP_COPY the user must not touch the device, otherwise the state + * can be exited. + * * The remaining possible transitions are interpreted as combinations of the * above FSM arcs. As there are multiple paths through the FSM arcs the path * should be selected based on the following rules: @@ -1141,6 +1167,11 @@ struct vfio_device_feature_mig_state { * fails. When handling these types of errors users should anticipate future * revisions of this protocol using new states and those states becoming * visible in this case. + * + * The optional states cannot be used with SET_STATE if the device does not + * support them. The user can discover if these states are supported by using + * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can + * avoid knowing about these optional states if the kernel driver supports them. */ enum vfio_device_mig_state { VFIO_DEVICE_STATE_ERROR = 0, @@ -1148,6 +1179,7 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_RUNNING = 2, VFIO_DEVICE_STATE_STOP_COPY = 3, VFIO_DEVICE_STATE_RESUMING = 4, + VFIO_DEVICE_STATE_RUNNING_P2P = 5, }; /* -------- API for Type1 VFIO IOMMU -------- */ -- cgit v1.2.3-71-gd317 From 915076f70efad04bf6fc843970a3c7763487011a Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 24 Feb 2022 16:20:23 +0200 Subject: vfio/pci: Expose vfio_pci_core_aer_err_detected() Expose vfio_pci_core_aer_err_detected() to be used by drivers as part of their pci_error_handlers structure. Next patch for mlx5 driver will use it. Link: https://lore.kernel.org/all/20220224142024.147653-15-yishaih@nvidia.com Reviewed-by: Alex Williamson Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/pci/vfio_pci_core.c | 7 ++++--- include/linux/vfio_pci_core.h | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 106e1970d653..e301092e94ef 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1871,8 +1871,8 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) } EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device); -static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) +pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) { struct vfio_pci_core_device *vdev; struct vfio_device *device; @@ -1894,6 +1894,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_CAN_RECOVER; } +EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected); int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) { @@ -1916,7 +1917,7 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); const struct pci_error_handlers vfio_pci_core_err_handlers = { - .error_detected = vfio_pci_aer_err_detected, + .error_detected = vfio_pci_core_aer_err_detected, }; EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index beba0b2ed87d..9f1bf8e49d43 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -232,6 +232,8 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); +pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, + pci_channel_state_t state); static inline bool vfio_pci_is_vga(struct pci_dev *pdev) { -- cgit v1.2.3-71-gd317 From 3f8bab174cb26aa5a8053c4457cc733881e3ad88 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 3 Mar 2022 09:08:31 +0100 Subject: serial: make uart_console_write->putchar()'s character an unsigned char MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, uart_console_write->putchar's second parameter (the character) is of type int. It makes little sense, provided uart_console_write() accepts the input string as "const char *s" and passes its content -- the characters -- to putchar(). So switch the character's type to unsigned char. We don't use char as that is signed on some platforms. That would cause troubles for drivers which (implicitly) cast the char to u16 when writing to the device. Sign extension would happen in that case and the value written would be completely different to the provided char. DZ is an example of such a driver -- on MIPS, it uses u16 for dz_out in dz_console_putchar(). Note we do the char -> uchar conversion implicitly in uart_console_write(). Provided we do not change size of the data type, sign extension does not happen there, so the problem is void. This makes the types consistent and unified with the rest of the uart layer, which uses unsigned char in most places already. One exception is xmit_buf, but that is going to be converted later. Cc: Paul Cercueil Cc: Tobias Klauser Cc: Russell King Cc: Vineet Gupta Cc: Nicolas Ferre Cc: Alexandre Belloni Cc: Ludovic Desroches Cc: Florian Fainelli Cc: bcm-kernel-feedback-list@broadcom.com Cc: Alexander Shiyan Cc: Baruch Siach Cc: "Maciej W. Rozycki" Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Shawn Guo Cc: Sascha Hauer Cc: Pengutronix Kernel Team Cc: Fabio Estevam Cc: NXP Linux Team Cc: Karol Gugala Cc: Mateusz Holenko Cc: Vladimir Zapolskiy Cc: Neil Armstrong Cc: Kevin Hilman Cc: Jerome Brunet Cc: Martin Blumenstingl Cc: Taichi Sugaya Cc: Takao Orito Cc: Liviu Dudau Cc: Sudeep Holla Cc: Lorenzo Pieralisi Cc: "Andreas Färber" Cc: Manivannan Sadhasivam Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Andy Gross Cc: Bjorn Andersson Cc: Krzysztof Kozlowski Cc: Orson Zhai Cc: Baolin Wang Cc: Chunyan Zhang Cc: Patrice Chotard Cc: Maxime Coquelin Cc: Alexandre Torgue Cc: "David S. Miller" Cc: Peter Korsgaard Cc: Michal Simek Acked-by: Richard Genoud [atmel_serial] Acked-by: Uwe Kleine-König Acked-by: Paul Cercueil Acked-by: Neil Armstrong # meson_serial Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20220303080831.21783-1-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/goldfish.c | 2 +- drivers/tty/hvc/hvc_dcc.c | 2 +- drivers/tty/serial/21285.c | 2 +- drivers/tty/serial/8250/8250_early.c | 2 +- drivers/tty/serial/8250/8250_ingenic.c | 2 +- drivers/tty/serial/8250/8250_port.c | 2 +- drivers/tty/serial/altera_jtaguart.c | 4 ++-- drivers/tty/serial/altera_uart.c | 2 +- drivers/tty/serial/amba-pl010.c | 2 +- drivers/tty/serial/amba-pl011.c | 6 +++--- drivers/tty/serial/apbuart.c | 2 +- drivers/tty/serial/ar933x_uart.c | 2 +- drivers/tty/serial/arc_uart.c | 2 +- drivers/tty/serial/atmel_serial.c | 2 +- drivers/tty/serial/bcm63xx_uart.c | 2 +- drivers/tty/serial/clps711x.c | 2 +- drivers/tty/serial/digicolor-usart.c | 2 +- drivers/tty/serial/dz.c | 2 +- drivers/tty/serial/earlycon-arm-semihost.c | 2 +- drivers/tty/serial/earlycon-riscv-sbi.c | 2 +- drivers/tty/serial/fsl_linflexuart.c | 4 ++-- drivers/tty/serial/fsl_lpuart.c | 4 ++-- drivers/tty/serial/imx.c | 2 +- drivers/tty/serial/imx_earlycon.c | 2 +- drivers/tty/serial/ip22zilog.c | 2 +- drivers/tty/serial/lantiq.c | 2 +- drivers/tty/serial/liteuart.c | 2 +- drivers/tty/serial/lpc32xx_hs.c | 2 +- drivers/tty/serial/meson_uart.c | 2 +- drivers/tty/serial/milbeaut_usio.c | 2 +- drivers/tty/serial/mps2-uart.c | 4 ++-- drivers/tty/serial/mvebu-uart.c | 4 ++-- drivers/tty/serial/mxs-auart.c | 2 +- drivers/tty/serial/omap-serial.c | 4 ++-- drivers/tty/serial/owl-uart.c | 2 +- drivers/tty/serial/pch_uart.c | 2 +- drivers/tty/serial/pic32_uart.c | 2 +- drivers/tty/serial/pmac_zilog.c | 2 +- drivers/tty/serial/pxa.c | 2 +- drivers/tty/serial/qcom_geni_serial.c | 2 +- drivers/tty/serial/rda-uart.c | 2 +- drivers/tty/serial/sa1100.c | 2 +- drivers/tty/serial/samsung_tty.c | 4 ++-- drivers/tty/serial/sb1250-duart.c | 2 +- drivers/tty/serial/sccnxp.c | 2 +- drivers/tty/serial/serial_core.c | 2 +- drivers/tty/serial/serial_txx9.c | 2 +- drivers/tty/serial/sh-sci.c | 2 +- drivers/tty/serial/sifive.c | 4 ++-- drivers/tty/serial/sprd_serial.c | 4 ++-- drivers/tty/serial/st-asc.c | 2 +- drivers/tty/serial/stm32-usart.c | 2 +- drivers/tty/serial/sunplus-uart.c | 5 +++-- drivers/tty/serial/sunsab.c | 2 +- drivers/tty/serial/sunsu.c | 2 +- drivers/tty/serial/sunzilog.c | 4 ++-- drivers/tty/serial/uartlite.c | 4 ++-- drivers/tty/serial/vr41xx_siu.c | 2 +- drivers/tty/serial/vt8500_serial.c | 2 +- drivers/tty/serial/xilinx_uartps.c | 2 +- drivers/tty/serial/zs.c | 2 +- include/linux/serial_core.h | 2 +- 62 files changed, 77 insertions(+), 76 deletions(-) (limited to 'include/linux') diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index 5ed19a9857ad..ad13532e92fe 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -434,7 +434,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) } #ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE -static void gf_early_console_putchar(struct uart_port *port, int ch) +static void gf_early_console_putchar(struct uart_port *port, unsigned char ch) { __raw_writel(ch, port->membase); } diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c index 8e0edb7d93fd..bd61f9372d83 100644 --- a/drivers/tty/hvc/hvc_dcc.c +++ b/drivers/tty/hvc/hvc_dcc.c @@ -15,7 +15,7 @@ #define DCC_STATUS_RX (1 << 30) #define DCC_STATUS_TX (1 << 29) -static void dcc_uart_console_putchar(struct uart_port *port, int ch) +static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch) { while (__dcc_getstatus() & DCC_STATUS_TX) cpu_relax(); diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c index 09baef4ccc39..7520cc02fd4d 100644 --- a/drivers/tty/serial/21285.c +++ b/drivers/tty/serial/21285.c @@ -403,7 +403,7 @@ static void serial21285_setup_ports(void) } #ifdef CONFIG_SERIAL_21285_CONSOLE -static void serial21285_console_putchar(struct uart_port *port, int ch) +static void serial21285_console_putchar(struct uart_port *port, unsigned char ch) { while (*CSR_UARTFLG & 0x20) barrier(); diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index c171ce6db691..e52585064565 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -86,7 +86,7 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value) #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -static void serial_putc(struct uart_port *port, int c) +static void serial_putc(struct uart_port *port, unsigned char c) { unsigned int status; diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c index 65402d05eff9..cff91aa03f29 100644 --- a/drivers/tty/serial/8250/8250_ingenic.c +++ b/drivers/tty/serial/8250/8250_ingenic.c @@ -52,7 +52,7 @@ static void early_out(struct uart_port *port, int offset, uint8_t value) writel(value, port->membase + (offset << 2)); } -static void ingenic_early_console_putc(struct uart_port *port, int c) +static void ingenic_early_console_putc(struct uart_port *port, unsigned char c) { uint8_t lsr; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 25b84d30e771..267026892264 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -3305,7 +3305,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults); #ifdef CONFIG_SERIAL_8250_CONSOLE -static void serial8250_console_putchar(struct uart_port *port, int ch) +static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_8250_port *up = up_to_u8250p(port); diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index 37bffe406b18..1c16345d0a1f 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c @@ -298,7 +298,7 @@ static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS]; #if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) #if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS) -static void altera_jtaguart_console_putc(struct uart_port *port, int c) +static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c) { unsigned long status; unsigned long flags; @@ -318,7 +318,7 @@ static void altera_jtaguart_console_putc(struct uart_port *port, int c) spin_unlock_irqrestore(&port->lock, flags); } #else -static void altera_jtaguart_console_putc(struct uart_port *port, int c) +static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c) { unsigned long flags; diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 64a352b40197..8b749ed557c6 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -438,7 +438,7 @@ static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS]; #if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE) -static void altera_uart_console_putc(struct uart_port *port, int c) +static void altera_uart_console_putc(struct uart_port *port, unsigned char c) { while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) & ALTERA_UART_STATUS_TRDY_MSK)) diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index b73375214ac3..fae0b581ff42 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -551,7 +551,7 @@ static struct uart_amba_port *amba_ports[UART_NR]; #ifdef CONFIG_SERIAL_AMBA_PL010_CONSOLE -static void pl010_console_putchar(struct uart_port *port, int ch) +static void pl010_console_putchar(struct uart_port *port, unsigned char ch) { unsigned int status; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index ba053a68529f..51ecb050ae40 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2255,7 +2255,7 @@ static struct uart_amba_port *amba_ports[UART_NR]; #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE -static void pl011_console_putchar(struct uart_port *port, int ch) +static void pl011_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); @@ -2471,7 +2471,7 @@ static struct console amba_console = { #define AMBA_CONSOLE (&amba_console) -static void qdf2400_e44_putc(struct uart_port *port, int c) +static void qdf2400_e44_putc(struct uart_port *port, unsigned char c) { while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) cpu_relax(); @@ -2487,7 +2487,7 @@ static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned uart_console_write(&dev->port, s, n, qdf2400_e44_putc); } -static void pl011_putc(struct uart_port *port, int c) +static void pl011_putc(struct uart_port *port, unsigned char c) { while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) cpu_relax(); diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c index d8c937bdf3f9..9ef82d870ff2 100644 --- a/drivers/tty/serial/apbuart.c +++ b/drivers/tty/serial/apbuart.c @@ -413,7 +413,7 @@ static void apbuart_flush_fifo(struct uart_port *port) #ifdef CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE -static void apbuart_console_putchar(struct uart_port *port, int ch) +static void apbuart_console_putchar(struct uart_port *port, unsigned char ch) { unsigned int status; do { diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 8cabe50c4a33..6269dbf93546 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -613,7 +613,7 @@ static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up) } while ((status & AR933X_UART_DATA_TX_CSR) == 0); } -static void ar933x_uart_console_putchar(struct uart_port *port, int ch) +static void ar933x_uart_console_putchar(struct uart_port *port, unsigned char ch) { struct ar933x_uart_port *up = container_of(port, struct ar933x_uart_port, port); diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 596217d10d5c..2a09e92ef9ed 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -508,7 +508,7 @@ static int arc_serial_console_setup(struct console *co, char *options) return uart_set_options(port, co, baud, parity, bits, flow); } -static void arc_serial_console_putchar(struct uart_port *port, int ch) +static void arc_serial_console_putchar(struct uart_port *port, unsigned char ch) { while (!(UART_GET_STATUS(port) & TXEMPTY)) cpu_relax(); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 73d43919898d..3a45e4fc7993 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2541,7 +2541,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port, } #ifdef CONFIG_SERIAL_ATMEL_CONSOLE -static void atmel_console_putchar(struct uart_port *port, int ch) +static void atmel_console_putchar(struct uart_port *port, unsigned char ch) { while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) cpu_relax(); diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 6471a54b616b..53b43174aa40 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -681,7 +681,7 @@ static void wait_for_xmitr(struct uart_port *port) /* * output given char */ -static void bcm_console_putchar(struct uart_port *port, int ch) +static void bcm_console_putchar(struct uart_port *port, unsigned char ch) { wait_for_xmitr(port); bcm_uart_writel(port, ch, UART_FIFO_REG); diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index 95abc6faa3d5..b9b66ad31a08 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -348,7 +348,7 @@ static const struct uart_ops uart_clps711x_ops = { }; #ifdef CONFIG_SERIAL_CLPS711X_CONSOLE -static void uart_clps711x_console_putchar(struct uart_port *port, int ch) +static void uart_clps711x_console_putchar(struct uart_port *port, unsigned char ch) { struct clps711x_port *s = dev_get_drvdata(port->dev); u32 sysflg = 0; diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c index 13ac36e2da4f..6d70fea76bb3 100644 --- a/drivers/tty/serial/digicolor-usart.c +++ b/drivers/tty/serial/digicolor-usart.c @@ -381,7 +381,7 @@ static const struct uart_ops digicolor_uart_ops = { .request_port = digicolor_uart_request_port, }; -static void digicolor_uart_console_putchar(struct uart_port *port, int ch) +static void digicolor_uart_console_putchar(struct uart_port *port, unsigned char ch) { while (digicolor_uart_tx_full(port)) cpu_relax(); diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index e9edabc5a211..2e21acf39720 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -802,7 +802,7 @@ static void __init dz_init_ports(void) * restored. Welcome to the world of PDP-11! * ------------------------------------------------------------------- */ -static void dz_console_putchar(struct uart_port *uport, int ch) +static void dz_console_putchar(struct uart_port *uport, unsigned char ch) { struct dz_port *dport = to_dport(uport); unsigned long flags; diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-arm-semihost.c index fa096c10b591..fcdec5f42376 100644 --- a/drivers/tty/serial/earlycon-arm-semihost.c +++ b/drivers/tty/serial/earlycon-arm-semihost.c @@ -21,7 +21,7 @@ /* * Semihosting-based debug console */ -static void smh_putc(struct uart_port *port, int c) +static void smh_putc(struct uart_port *port, unsigned char c) { #ifdef CONFIG_ARM64 asm volatile("mov x1, %0\n" diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c index ce81523c3113..27afb0b74ea7 100644 --- a/drivers/tty/serial/earlycon-riscv-sbi.c +++ b/drivers/tty/serial/earlycon-riscv-sbi.c @@ -10,7 +10,7 @@ #include #include -static void sbi_putc(struct uart_port *port, int c) +static void sbi_putc(struct uart_port *port, unsigned char c) { sbi_console_putchar(c); } diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c index e72cba085743..98bb0c315e13 100644 --- a/drivers/tty/serial/fsl_linflexuart.c +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -553,7 +553,7 @@ static const struct uart_ops linflex_pops = { static struct uart_port *linflex_ports[UART_NR]; #ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE -static void linflex_console_putchar(struct uart_port *port, int ch) +static void linflex_console_putchar(struct uart_port *port, unsigned char ch) { unsigned long cr; @@ -578,7 +578,7 @@ static void linflex_console_putchar(struct uart_port *port, int ch) } } -static void linflex_earlycon_putchar(struct uart_port *port, int ch) +static void linflex_earlycon_putchar(struct uart_port *port, unsigned char ch) { unsigned long flags; char *ret; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 7d90c5a530ee..87789872f400 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2333,13 +2333,13 @@ static const struct uart_ops lpuart32_pops = { static struct lpuart_port *lpuart_ports[UART_NR]; #ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE -static void lpuart_console_putchar(struct uart_port *port, int ch) +static void lpuart_console_putchar(struct uart_port *port, unsigned char ch) { lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE); writeb(ch, port->membase + UARTDR); } -static void lpuart32_console_putchar(struct uart_port *port, int ch) +static void lpuart32_console_putchar(struct uart_port *port, unsigned char ch) { lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE); lpuart32_write(port, ch, UARTDATA); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 0b467ce8d737..fd38e6ed4fda 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1968,7 +1968,7 @@ static const struct uart_ops imx_uart_pops = { static struct imx_port *imx_uart_ports[UART_NR]; #if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) -static void imx_uart_console_putchar(struct uart_port *port, int ch) +static void imx_uart_console_putchar(struct uart_port *port, unsigned char ch) { struct imx_port *sport = (struct imx_port *)port; diff --git a/drivers/tty/serial/imx_earlycon.c b/drivers/tty/serial/imx_earlycon.c index 795606e1a22f..7aab38b2bd8c 100644 --- a/drivers/tty/serial/imx_earlycon.c +++ b/drivers/tty/serial/imx_earlycon.c @@ -16,7 +16,7 @@ #define UTS_TXFULL (1<<4) /* TxFIFO full */ #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ -static void imx_uart_console_early_putchar(struct uart_port *port, int ch) +static void imx_uart_console_early_putchar(struct uart_port *port, unsigned char ch) { while (readl_relaxed(port->membase + IMX21_UTS) & UTS_TXFULL) cpu_relax(); diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c index f4dc5fe4ba92..655e64b26852 100644 --- a/drivers/tty/serial/ip22zilog.c +++ b/drivers/tty/serial/ip22zilog.c @@ -990,7 +990,7 @@ static struct zilog_layout * __init get_zs(int chip) #define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ #ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE -static void ip22zilog_put_char(struct uart_port *port, int ch) +static void ip22zilog_put_char(struct uart_port *port, unsigned char ch) { struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port); int loops = ZS_PUT_CHAR_MAX_DELAY; diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 3e324d3f0a6d..a3120c3347dd 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -598,7 +598,7 @@ static const struct uart_ops lqasc_pops = { #ifdef CONFIG_SERIAL_LANTIQ_CONSOLE static void -lqasc_console_putchar(struct uart_port *port, int ch) +lqasc_console_putchar(struct uart_port *port, unsigned char ch) { int fifofree; diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c index 7f74bf7bdcff..328b50521f14 100644 --- a/drivers/tty/serial/liteuart.c +++ b/drivers/tty/serial/liteuart.c @@ -93,7 +93,7 @@ static void liteuart_timer(struct timer_list *t) mod_timer(&uart->timer, jiffies + uart_poll_timeout(port)); } -static void liteuart_putchar(struct uart_port *port, int ch) +static void liteuart_putchar(struct uart_port *port, unsigned char ch) { while (litex_read8(port->membase + OFF_TXFULL)) cpu_relax(); diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c index 54437a087aa0..93140cac1ca1 100644 --- a/drivers/tty/serial/lpc32xx_hs.c +++ b/drivers/tty/serial/lpc32xx_hs.c @@ -122,7 +122,7 @@ static void wait_for_xmit_ready(struct uart_port *port) } } -static void lpc32xx_hsuart_console_putchar(struct uart_port *port, int ch) +static void lpc32xx_hsuart_console_putchar(struct uart_port *port, unsigned char ch) { wait_for_xmit_ready(port); writel((u32)ch, LPC32XX_HSUART_FIFO(port->membase)); diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 45e00d928253..2bf1c57e0981 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -513,7 +513,7 @@ static void meson_uart_enable_tx_engine(struct uart_port *port) writel(val, port->membase + AML_UART_CONTROL); } -static void meson_console_putchar(struct uart_port *port, int ch) +static void meson_console_putchar(struct uart_port *port, unsigned char ch) { if (!port->membase) return; diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c index 8f2cab7f66ad..347088bb380e 100644 --- a/drivers/tty/serial/milbeaut_usio.c +++ b/drivers/tty/serial/milbeaut_usio.c @@ -400,7 +400,7 @@ static const struct uart_ops mlb_usio_ops = { #ifdef CONFIG_SERIAL_MILBEAUT_USIO_CONSOLE -static void mlb_usio_console_putchar(struct uart_port *port, int c) +static void mlb_usio_console_putchar(struct uart_port *port, unsigned char c) { while (!(readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TDRE)) cpu_relax(); diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c index 587b42f754cb..5e9429dcc51f 100644 --- a/drivers/tty/serial/mps2-uart.c +++ b/drivers/tty/serial/mps2-uart.c @@ -432,7 +432,7 @@ static const struct uart_ops mps2_uart_pops = { static DEFINE_IDR(ports_idr); #ifdef CONFIG_SERIAL_MPS2_UART_CONSOLE -static void mps2_uart_console_putchar(struct uart_port *port, int ch) +static void mps2_uart_console_putchar(struct uart_port *port, unsigned char ch) { while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL) cpu_relax(); @@ -484,7 +484,7 @@ static struct console mps2_uart_console = { #define MPS2_SERIAL_CONSOLE (&mps2_uart_console) -static void mps2_early_putchar(struct uart_port *port, int ch) +static void mps2_early_putchar(struct uart_port *port, unsigned char ch) { while (readb(port->membase + UARTn_STATE) & UARTn_STATE_TX_FULL) cpu_relax(); diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 45976e2140d4..0429c2a54290 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -676,7 +676,7 @@ static const struct uart_ops mvebu_uart_ops = { #ifdef CONFIG_SERIAL_MVEBU_CONSOLE /* Early Console */ -static void mvebu_uart_putc(struct uart_port *port, int c) +static void mvebu_uart_putc(struct uart_port *port, unsigned char c) { unsigned int st; @@ -737,7 +737,7 @@ static void wait_for_xmite(struct uart_port *port) (val & STAT_TX_EMP), 1, 10000); } -static void mvebu_uart_console_putchar(struct uart_port *port, int ch) +static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch) { wait_for_xmitr(port); writel(ch, port->membase + UART_TSH(port)); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index ac45f3386e97..1944daf8593a 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1305,7 +1305,7 @@ static const struct uart_ops mxs_auart_ops = { static struct mxs_auart_port *auart_port[MXS_AUART_PORTS]; #ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE -static void mxs_auart_console_putchar(struct uart_port *port, int ch) +static void mxs_auart_console_putchar(struct uart_port *port, unsigned char ch) { struct mxs_auart_port *s = to_auart_port(port); unsigned int to = 1000; diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index b4e060205e61..8d5ffa196097 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1180,7 +1180,7 @@ static void omap_serial_early_out(struct uart_port *port, int offset, writew(value, port->membase + offset); } -static void omap_serial_early_putc(struct uart_port *port, int c) +static void omap_serial_early_putc(struct uart_port *port, unsigned char c) { unsigned int status; @@ -1224,7 +1224,7 @@ static struct uart_omap_port *serial_omap_console_ports[OMAP_MAX_HSUART_PORTS]; static struct uart_driver serial_omap_reg; -static void serial_omap_console_putchar(struct uart_port *port, int ch) +static void serial_omap_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_omap_port *up = to_uart_omap_port(port); diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index 91f1eb0058d7..5250bd7d390a 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -516,7 +516,7 @@ static const struct uart_ops owl_uart_ops = { #ifdef CONFIG_SERIAL_OWL_CONSOLE -static void owl_console_putchar(struct uart_port *port, int ch) +static void owl_console_putchar(struct uart_port *port, unsigned char ch) { if (!port->membase) return; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index f0351e6f0ef6..affe71f8b50c 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -1600,7 +1600,7 @@ static const struct uart_ops pch_uart_ops = { #ifdef CONFIG_SERIAL_PCH_UART_CONSOLE -static void pch_console_putchar(struct uart_port *port, int ch) +static void pch_console_putchar(struct uart_port *port, unsigned char ch) { struct eg20t_port *priv = container_of(port, struct eg20t_port, port); diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index 0a12fb11e698..b7a3a1b959b1 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -691,7 +691,7 @@ static const struct uart_ops pic32_uart_ops = { #ifdef CONFIG_SERIAL_PIC32_CONSOLE /* output given char */ -static void pic32_console_putchar(struct uart_port *port, int ch) +static void pic32_console_putchar(struct uart_port *port, unsigned char ch) { struct pic32_sport *sport = to_pic32_sport(port); diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 5359236b32d6..5d97c201ad88 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -1944,7 +1944,7 @@ static void __exit exit_pmz(void) #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE -static void pmz_console_putchar(struct uart_port *port, int ch) +static void pmz_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_pmac_port *uap = container_of(port, struct uart_pmac_port, port); diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c index b4987b417f19..e80ba8e10407 100644 --- a/drivers/tty/serial/pxa.c +++ b/drivers/tty/serial/pxa.c @@ -605,7 +605,7 @@ static void wait_for_xmitr(struct uart_pxa_port *up) } } -static void serial_pxa_console_putchar(struct uart_port *port, int ch) +static void serial_pxa_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_pxa_port *up = (struct uart_pxa_port *)port; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index aedc38893e6c..1543a6028856 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -397,7 +397,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport, #endif #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch) +static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch) { struct qcom_geni_private_data *private_data = uport->private_data; diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index d550d8fa2fab..e5f1fded423a 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -573,7 +573,7 @@ static const struct uart_ops rda_uart_ops = { #ifdef CONFIG_SERIAL_RDA_CONSOLE -static void rda_console_putchar(struct uart_port *port, int ch) +static void rda_console_putchar(struct uart_port *port, unsigned char ch) { if (!port->membase) return; diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c index 697b6a002a16..5fe6cccfc1ae 100644 --- a/drivers/tty/serial/sa1100.c +++ b/drivers/tty/serial/sa1100.c @@ -695,7 +695,7 @@ void __init sa1100_register_uart(int idx, int port) #ifdef CONFIG_SERIAL_SA1100_CONSOLE -static void sa1100_console_putchar(struct uart_port *port, int ch) +static void sa1100_console_putchar(struct uart_port *port, unsigned char ch) { struct sa1100_port *sport = container_of(port, struct sa1100_port, port); diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index d002a4e48ed9..2bde7bde7116 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -2478,7 +2478,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port, #endif /* CONFIG_CONSOLE_POLL */ static void -s3c24xx_serial_console_putchar(struct uart_port *port, int ch) +s3c24xx_serial_console_putchar(struct uart_port *port, unsigned char ch) { unsigned int ufcon = rd_regl(port, S3C2410_UFCON); @@ -2965,7 +2965,7 @@ static void samsung_early_busyuart_fifo(struct uart_port *port) ; } -static void samsung_early_putc(struct uart_port *port, int c) +static void samsung_early_putc(struct uart_port *port, unsigned char c) { if (readl(port->membase + S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) samsung_early_busyuart_fifo(port); diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index 738df6d9c0d9..2cf8533ef760 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -820,7 +820,7 @@ static void __init sbd_probe_duarts(void) * console output. The console_lock is held by the caller, so we * shouldn't be interrupted for more console activity. */ -static void sbd_console_putchar(struct uart_port *uport, int ch) +static void sbd_console_putchar(struct uart_port *uport, unsigned char ch) { struct sbd_port *sport = to_sport(uport); diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index 10cc16a71f26..c56de2e104d4 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -828,7 +828,7 @@ static const struct uart_ops sccnxp_ops = { }; #ifdef CONFIG_SERIAL_SCCNXP_CONSOLE -static void sccnxp_console_putchar(struct uart_port *port, int c) +static void sccnxp_console_putchar(struct uart_port *port, unsigned char c) { int tryes = 100000; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 846192a7b4bf..a1688a341411 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1911,7 +1911,7 @@ static void uart_port_spin_lock_init(struct uart_port *port) */ void uart_console_write(struct uart_port *port, const char *s, unsigned int count, - void (*putchar)(struct uart_port *, int)) + void (*putchar)(struct uart_port *, unsigned char)) { unsigned int i; diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index cd38cf1f03cb..a695e9c1a06a 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -876,7 +876,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv, #ifdef CONFIG_SERIAL_TXX9_CONSOLE -static void serial_txx9_console_putchar(struct uart_port *port, int ch) +static void serial_txx9_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_txx9_port *up = to_uart_txx9_port(port); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 77d76973858f..0f9b8bd23500 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2960,7 +2960,7 @@ static void sci_cleanup_single(struct sci_port *port) #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \ defined(CONFIG_SERIAL_SH_SCI_EARLYCON) -static void serial_console_putchar(struct uart_port *port, int ch) +static void serial_console_putchar(struct uart_port *port, unsigned char ch) { sci_poll_put_char(port, ch); } diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index b79900d0e91a..f5ac14c384c4 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -756,7 +756,7 @@ static void sifive_serial_poll_put_char(struct uart_port *port, */ #ifdef CONFIG_SERIAL_EARLYCON -static void early_sifive_serial_putc(struct uart_port *port, int c) +static void early_sifive_serial_putc(struct uart_port *port, unsigned char c) { while (__ssp_early_readl(port, SIFIVE_SERIAL_TXDATA_OFFS) & SIFIVE_SERIAL_TXDATA_FULL_MASK) @@ -800,7 +800,7 @@ OF_EARLYCON_DECLARE(sifive, "sifive,fu540-c000-uart0", static struct sifive_serial_port *sifive_serial_console_ports[SIFIVE_SERIAL_MAX_PORTS]; -static void sifive_serial_console_putchar(struct uart_port *port, int ch) +static void sifive_serial_console_putchar(struct uart_port *port, unsigned char ch) { struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 9a7ae6384edf..4329b9c9cbf0 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -984,7 +984,7 @@ static void wait_for_xmitr(struct uart_port *port) } while (status & SPRD_TX_FIFO_CNT_MASK); } -static void sprd_console_putchar(struct uart_port *port, int ch) +static void sprd_console_putchar(struct uart_port *port, unsigned char ch) { wait_for_xmitr(port); serial_out(port, SPRD_TXD, ch); @@ -1058,7 +1058,7 @@ console_initcall(sprd_serial_console_init); #define SPRD_CONSOLE (&sprd_console) /* Support for earlycon */ -static void sprd_putc(struct uart_port *port, int c) +static void sprd_putc(struct uart_port *port, unsigned char c) { unsigned int timeout = SPRD_TIMEOUT; diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index 87e480cc8206..d7fd692286cf 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -854,7 +854,7 @@ static int asc_serial_resume(struct device *dev) /*----------------------------------------------------------------------*/ #ifdef CONFIG_SERIAL_ST_ASC_CONSOLE -static void asc_console_putchar(struct uart_port *port, int ch) +static void asc_console_putchar(struct uart_port *port, unsigned char ch) { unsigned int timeout = 1000000; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 1b3a611ac39e..87b5cd4c9743 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -1641,7 +1641,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev) } #ifdef CONFIG_SERIAL_STM32_CONSOLE -static void stm32_usart_console_putchar(struct uart_port *port, int ch) +static void stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) { struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c index 1c7a30bf5f04..9f15922e681b 100644 --- a/drivers/tty/serial/sunplus-uart.c +++ b/drivers/tty/serial/sunplus-uart.c @@ -500,7 +500,8 @@ static const struct uart_ops sunplus_uart_ops = { #ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR]; -static void sunplus_uart_console_putchar(struct uart_port *port, int ch) +static void sunplus_uart_console_putchar(struct uart_port *port, + unsigned char ch) { wait_for_xmitr(port); sp_uart_put_char(port, ch); @@ -736,7 +737,7 @@ static void __exit sunplus_uart_exit(void) module_exit(sunplus_uart_exit); #ifdef CONFIG_SERIAL_EARLYCON -static void sunplus_uart_putc(struct uart_port *port, int c) +static void sunplus_uart_putc(struct uart_port *port, unsigned char c) { unsigned int val; int ret; diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 92e572634009..6ea52293d9f3 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -846,7 +846,7 @@ static struct uart_sunsab_port *sunsab_ports; #ifdef CONFIG_SERIAL_SUNSAB_CONSOLE -static void sunsab_console_putchar(struct uart_port *port, int c) +static void sunsab_console_putchar(struct uart_port *port, unsigned char c) { struct uart_sunsab_port *up = container_of(port, struct uart_sunsab_port, port); diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 98b2f4fb9a99..c31389114b86 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1281,7 +1281,7 @@ static void wait_for_xmitr(struct uart_sunsu_port *up) } } -static void sunsu_console_putchar(struct uart_port *port, int ch) +static void sunsu_console_putchar(struct uart_port *port, unsigned char ch) { struct uart_sunsu_port *up = container_of(port, struct uart_sunsu_port, port); diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index b714b00d2dad..c14275d83b0b 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -100,7 +100,7 @@ struct uart_sunzilog_port { #endif }; -static void sunzilog_putchar(struct uart_port *port, int ch); +static void sunzilog_putchar(struct uart_port *port, unsigned char ch); #define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase)) #define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT)) @@ -1125,7 +1125,7 @@ static void sunzilog_free_tables(void) #define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ -static void __maybe_unused sunzilog_putchar(struct uart_port *port, int ch) +static void __maybe_unused sunzilog_putchar(struct uart_port *port, unsigned char ch) { struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); int loops = ZS_PUT_CHAR_MAX_DELAY; diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index e1fa52d31474..007db67292a2 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -482,7 +482,7 @@ static void ulite_console_wait_tx(struct uart_port *port) "timeout waiting for TX buffer empty\n"); } -static void ulite_console_putchar(struct uart_port *port, int ch) +static void ulite_console_putchar(struct uart_port *port, unsigned char ch) { ulite_console_wait_tx(port); uart_out32(ch, ULITE_TX, port); @@ -558,7 +558,7 @@ static struct console ulite_console = { .data = &ulite_uart_driver, }; -static void early_uartlite_putc(struct uart_port *port, int c) +static void early_uartlite_putc(struct uart_port *port, unsigned char c) { /* * Limit how many times we'll spin waiting for TX FIFO status. diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c index 8586a56d4deb..e0bf003ca3a1 100644 --- a/drivers/tty/serial/vr41xx_siu.c +++ b/drivers/tty/serial/vr41xx_siu.c @@ -730,7 +730,7 @@ static void wait_for_xmitr(struct uart_port *port) } } -static void siu_console_putchar(struct uart_port *port, int ch) +static void siu_console_putchar(struct uart_port *port, unsigned char ch) { wait_for_xmitr(port); siu_write(port, UART_TX, ch); diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c index 9adfe3dc970f..6f08136ce78a 100644 --- a/drivers/tty/serial/vt8500_serial.c +++ b/drivers/tty/serial/vt8500_serial.c @@ -484,7 +484,7 @@ static void wait_for_xmitr(struct uart_port *port) } while (status & 0x10); } -static void vt8500_console_putchar(struct uart_port *port, int c) +static void vt8500_console_putchar(struct uart_port *port, unsigned char c) { wait_for_xmitr(port); writeb(c, port->membase + VT8500_TXFIFO); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index d5e243908d9f..250a1d888eeb 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1142,7 +1142,7 @@ static struct uart_driver cdns_uart_uart_driver; * @port: Handle to the uart port structure * @ch: Character to be written */ -static void cdns_uart_console_putchar(struct uart_port *port, int ch) +static void cdns_uart_console_putchar(struct uart_port *port, unsigned char ch) { while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL) cpu_relax(); diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index 4b4f604646a7..70969bf9d82c 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -1124,7 +1124,7 @@ static int __init zs_probe_sccs(void) #ifdef CONFIG_SERIAL_ZS_CONSOLE -static void zs_console_putchar(struct uart_port *uport, int ch) +static void zs_console_putchar(struct uart_port *uport, unsigned char ch) { struct zs_port *zport = to_zport(uport); struct zs_scc *scc = zport->scc; diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 31f7fe527395..14ae35f68abb 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -399,7 +399,7 @@ int uart_set_options(struct uart_port *port, struct console *co, int baud, struct tty_driver *uart_console_device(struct console *co, int *index); void uart_console_write(struct uart_port *port, const char *s, unsigned int count, - void (*putchar)(struct uart_port *, int)); + void (*putchar)(struct uart_port *, unsigned char)); /* * Port/driver registration/removal -- cgit v1.2.3-71-gd317 From a1ac9c8acec1605c6b43af418f79facafdced680 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:55:25 -0800 Subject: net: Add skb->mono_delivery_time to distinguish mono delivery_time from (rcv) timestamp skb->tstamp was first used as the (rcv) timestamp. The major usage is to report it to the user (e.g. SO_TIMESTAMP). Later, skb->tstamp is also set as the (future) delivery_time (e.g. EDT in TCP) during egress and used by the qdisc (e.g. sch_fq) to make decision on when the skb can be passed to the dev. Currently, there is no way to tell skb->tstamp having the (rcv) timestamp or the delivery_time, so it is always reset to 0 whenever forwarded between egress and ingress. While it makes sense to always clear the (rcv) timestamp in skb->tstamp to avoid confusing sch_fq that expects the delivery_time, it is a performance issue [0] to clear the delivery_time if the skb finally egress to a fq@phy-dev. For example, when forwarding from egress to ingress and then finally back to egress: tcp-sender => veth@netns => veth@hostns => fq@eth0@hostns ^ ^ reset rest This patch adds one bit skb->mono_delivery_time to flag the skb->tstamp is storing the mono delivery_time (EDT) instead of the (rcv) timestamp. The current use case is to keep the TCP mono delivery_time (EDT) and to be used with sch_fq. A latter patch will also allow tc-bpf@ingress to read and change the mono delivery_time. In the future, another bit (e.g. skb->user_delivery_time) can be added for the SCM_TXTIME where the clock base is tracked by sk->sk_clockid. [ This patch is a prep work. The following patches will get the other parts of the stack ready first. Then another patch after that will finally set the skb->mono_delivery_time. ] skb_set_delivery_time() function is added. It is used by the tcp_output.c and during ip[6] fragmentation to assign the delivery_time to the skb->tstamp and also set the skb->mono_delivery_time. A note on the change in ip_send_unicast_reply() in ip_output.c. It is only used by TCP to send reset/ack out of a ctl_sk. Like the new skb_set_delivery_time(), this patch sets the skb->mono_delivery_time to 0 for now as a place holder. It will be enabled in a latter patch. A similar case in tcp_ipv6 can be done with skb_set_delivery_time() in tcp_v6_send_response(). [0] (slide 22): https://linuxplumbersconf.org/event/11/contributions/953/attachments/867/1658/LPC_2021_BPF_Datapath_Extensions.pdf Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 13 +++++++++++++ net/bridge/netfilter/nf_conntrack_bridge.c | 5 +++-- net/ipv4/ip_output.c | 7 +++++-- net/ipv4/tcp_output.c | 16 +++++++++------- net/ipv6/ip6_output.c | 5 +++-- net/ipv6/netfilter.c | 5 +++-- net/ipv6/tcp_ipv6.c | 2 +- 7 files changed, 37 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d67941f78b92..803ffa63dea6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -795,6 +795,10 @@ typedef unsigned char *sk_buff_data_t; * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required + * @mono_delivery_time: When set, skb->tstamp has the + * delivery_time in mono clock base (i.e. EDT). Otherwise, the + * skb->tstamp has the (rcv) timestamp at ingress and + * delivery_time at egress. * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -965,6 +969,7 @@ struct sk_buff { __u8 decrypted:1; #endif __u8 slow_gro:1; + __u8 mono_delivery_time:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -3983,6 +3988,14 @@ static inline ktime_t net_timedelta(ktime_t t) return ktime_sub(ktime_get_real(), t); } +static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, + bool mono) +{ + skb->tstamp = kt; + /* Setting mono_delivery_time will be enabled later */ + skb->mono_delivery_time = 0; +} + static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index fdbed3158555..ebfb2a5c59e4 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -32,6 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; + bool mono_delivery_time = skb->mono_delivery_time; unsigned int hlen, ll_rs, mtu; ktime_t tstamp = skb->tstamp; struct ip_frag_state state; @@ -81,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, if (iter.frag) ip_fraglist_prepare(skb, &iter); - skb->tstamp = tstamp; + skb_set_delivery_time(skb, tstamp, mono_delivery_time); err = output(net, sk, data, skb); if (err || !iter.frag) break; @@ -112,7 +113,7 @@ slow_path: goto blackhole; } - skb2->tstamp = tstamp; + skb_set_delivery_time(skb2, tstamp, mono_delivery_time); err = output(net, sk, data, skb2); if (err) goto blackhole; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6df3545c891d..a9588e0c82c5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -761,6 +761,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, { struct iphdr *iph; struct sk_buff *skb2; + bool mono_delivery_time = skb->mono_delivery_time; struct rtable *rt = skb_rtable(skb); unsigned int mtu, hlen, ll_rs; struct ip_fraglist_iter iter; @@ -852,7 +853,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, } } - skb->tstamp = tstamp; + skb_set_delivery_time(skb, tstamp, mono_delivery_time); err = output(net, sk, skb); if (!err) @@ -908,7 +909,7 @@ slow_path: /* * Put this fragment into the sending queue. */ - skb2->tstamp = tstamp; + skb_set_delivery_time(skb2, tstamp, mono_delivery_time); err = output(net, sk, skb2); if (err) goto fail; @@ -1727,6 +1728,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; + /* Setting mono_delivery_time will be enabled later */ + nskb->mono_delivery_time = 0; ip_push_pending_frames(sk, &fl4); } out: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e76bf1e9251e..2319531267c6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1253,7 +1253,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tp = tcp_sk(sk); prior_wstamp = tp->tcp_wstamp_ns; tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); - skb->skb_mstamp_ns = tp->tcp_wstamp_ns; + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); if (clone_it) { oskb = skb; @@ -1589,7 +1589,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, skb_split(skb, buff, len); - buff->tstamp = skb->tstamp; + skb_set_delivery_time(buff, skb->tstamp, true); tcp_fragment_tstamp(skb, buff); old_factor = tcp_skb_pcount(skb); @@ -2616,7 +2616,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ - skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; + tp->tcp_wstamp_ns = tp->tcp_clock_cache; + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); tcp_init_tso_segs(skb, mss_now); goto repair; /* Skip network transmission */ @@ -3541,11 +3542,12 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, now = tcp_clock_ns(); #ifdef CONFIG_SYN_COOKIES if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) - skb->skb_mstamp_ns = cookie_init_timestamp(req, now); + skb_set_delivery_time(skb, cookie_init_timestamp(req, now), + true); else #endif { - skb->skb_mstamp_ns = now; + skb_set_delivery_time(skb, now, true); if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); } @@ -3594,7 +3596,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, synack_type, &opts); - skb->skb_mstamp_ns = now; + skb_set_delivery_time(skb, now, true); tcp_add_tx_delay(skb, tp); return skb; @@ -3771,7 +3773,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - syn->skb_mstamp_ns = syn_data->skb_mstamp_ns; + skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); /* Now full SYN+DATA was cloned and sent (or not), * remove the SYN from the original skb (syn_data) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c5edc86b18bd..dad4e3d0492e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -813,6 +813,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; + bool mono_delivery_time = skb->mono_delivery_time; struct ip6_frag_state state; unsigned int mtu, hlen, nexthdr_offset; ktime_t tstamp = skb->tstamp; @@ -903,7 +904,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (iter.frag) ip6_fraglist_prepare(skb, &iter); - skb->tstamp = tstamp; + skb_set_delivery_time(skb, tstamp, mono_delivery_time); err = output(net, sk, skb); if (!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), @@ -962,7 +963,7 @@ slow_path: /* * Put this fragment into the sending queue. */ - frag->tstamp = tstamp; + skb_set_delivery_time(frag, tstamp, mono_delivery_time); err = output(net, sk, frag); if (err) goto fail; diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 6ab710b5a1a8..1da332450d98 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -121,6 +121,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; + bool mono_delivery_time = skb->mono_delivery_time; ktime_t tstamp = skb->tstamp; struct ip6_frag_state state; u8 *prevhdr, nexthdr = 0; @@ -186,7 +187,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (iter.frag) ip6_fraglist_prepare(skb, &iter); - skb->tstamp = tstamp; + skb_set_delivery_time(skb, tstamp, mono_delivery_time); err = output(net, sk, data, skb); if (err || !iter.frag) break; @@ -219,7 +220,7 @@ slow_path: goto blackhole; } - skb2->tstamp = tstamp; + skb_set_delivery_time(skb2, tstamp, mono_delivery_time); err = output(net, sk, data, skb2); if (err) goto blackhole; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e98af869ff3a..cb2bb7d2e907 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -940,7 +940,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 } else { mark = sk->sk_mark; } - buff->tstamp = tcp_transmit_time(sk); + skb_set_delivery_time(buff, tcp_transmit_time(sk), true); } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; fl6.fl6_dport = t1->dest; -- cgit v1.2.3-71-gd317 From de799101519aad23c6096041ba2744d7b5517e6a Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:55:31 -0800 Subject: net: Add skb_clear_tstamp() to keep the mono delivery_time Right now, skb->tstamp is reset to 0 whenever the skb is forwarded. If skb->tstamp has the mono delivery_time, clearing it can hurt the performance when it finally transmits out to fq@phy-dev. The earlier patch added a skb->mono_delivery_time bit to flag the skb->tstamp carrying the mono delivery_time. This patch adds skb_clear_tstamp() helper which keeps the mono delivery_time and clears everything else. The delivery_time clearing will be postponed until the stack knows the skb will be delivered locally. It will be done in a latter patch. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- drivers/net/loopback.c | 2 +- include/linux/skbuff.h | 10 +++++++++- net/bridge/br_forward.c | 2 +- net/core/filter.c | 6 +++--- net/core/skbuff.c | 2 +- net/ipv4/ip_forward.c | 2 +- net/ipv6/ip6_output.c | 2 +- net/netfilter/ipvs/ip_vs_xmit.c | 6 +++--- net/netfilter/nf_dup_netdev.c | 2 +- net/netfilter/nf_flow_table_ip.c | 4 ++-- net/netfilter/nft_fwd_netdev.c | 2 +- net/openvswitch/vport.c | 2 +- net/xfrm/xfrm_interface.c | 2 +- 13 files changed, 26 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index d05f86fe78c9..720394c0639b 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -74,7 +74,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* do not fool net_timestamp_check() with various clock bases */ - skb->tstamp = 0; + skb_clear_tstamp(skb); skb_orphan(skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 803ffa63dea6..27a28920e7b3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3996,6 +3996,14 @@ static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, skb->mono_delivery_time = 0; } +static inline void skb_clear_tstamp(struct sk_buff *skb) +{ + if (skb->mono_delivery_time) + return; + + skb->tstamp = 0; +} + static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; @@ -4852,7 +4860,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) #ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) - skb->tstamp = 0; + skb_clear_tstamp(skb); #endif } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index ec646656dbf1..02bb620d3b8d 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - skb->tstamp = 0; + skb_clear_tstamp(skb); return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, net, sk, skb, NULL, skb->dev, br_dev_queue_push_xmit); diff --git a/net/core/filter.c b/net/core/filter.c index 65869fd510e8..cfcf9b4d1ec2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2107,7 +2107,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) } skb->dev = dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); dev_xmit_recursion_inc(); ret = dev_queue_xmit(skb); @@ -2176,7 +2176,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, } skb->dev = dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { skb = skb_expand_head(skb, hh_len); @@ -2274,7 +2274,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, } skb->dev = dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { skb = skb_expand_head(skb, hh_len); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b32c5d782fe1..9abb0028309f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5381,7 +5381,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) ipvs_reset(skb); skb->mark = 0; - skb->tstamp = 0; + skb_clear_tstamp(skb); } EXPORT_SYMBOL_GPL(skb_scrub_packet); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 00ec819f949b..92ba3350274b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -79,7 +79,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s if (unlikely(opt->optlen)) ip_forward_options(skb); - skb->tstamp = 0; + skb_clear_tstamp(skb); return dst_output(net, sk, skb); } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index dad4e3d0492e..50db9b20d746 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -440,7 +440,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk, } #endif - skb->tstamp = 0; + skb_clear_tstamp(skb); return dst_output(net, sk, skb); } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index d2e5a8f644b8..029171379884 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -610,7 +610,7 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, nf_reset_ct(skb); skb_forward_csum(skb); if (skb->dev) - skb->tstamp = 0; + skb_clear_tstamp(skb); } return ret; } @@ -652,7 +652,7 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, if (!local) { skb_forward_csum(skb); if (skb->dev) - skb->tstamp = 0; + skb_clear_tstamp(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else @@ -674,7 +674,7 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, ip_vs_drop_early_demux_sk(skb); skb_forward_csum(skb); if (skb->dev) - skb->tstamp = 0; + skb_clear_tstamp(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb, NULL, skb_dst(skb)->dev, dst_output); } else diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c index a579e59ee5c5..7873bd1389c3 100644 --- a/net/netfilter/nf_dup_netdev.c +++ b/net/netfilter/nf_dup_netdev.c @@ -19,7 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev) skb_push(skb, skb->mac_len); skb->dev = dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); dev_queue_xmit(skb); } diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 889cf88d3dba..f1d387129f02 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -376,7 +376,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, nf_flow_nat_ip(flow, skb, thoff, dir, iph); ip_decrease_ttl(iph); - skb->tstamp = 0; + skb_clear_tstamp(skb); if (flow_table->flags & NF_FLOWTABLE_COUNTER) nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); @@ -611,7 +611,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, nf_flow_nat_ipv6(flow, skb, dir, ip6h); ip6h->hop_limit--; - skb->tstamp = 0; + skb_clear_tstamp(skb); if (flow_table->flags & NF_FLOWTABLE_COUNTER) nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index 619e394a91de..08e7a289738e 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -145,7 +145,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr, return; skb->dev = dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); neigh_xmit(neigh_table, dev, addr, skb); out: regs->verdict.code = verdict; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index cf2ce5812489..82a74f998966 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -507,7 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto) } skb->dev = vport->dev; - skb->tstamp = 0; + skb_clear_tstamp(skb); vport->ops->send(skb); return; diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 57448fc519fc..4991e99ced9a 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -190,7 +190,7 @@ static void xfrmi_dev_uninit(struct net_device *dev) static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet) { - skb->tstamp = 0; + skb_clear_tstamp(skb); skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->ignore_df = 0; -- cgit v1.2.3-71-gd317 From 27942a15209f564ed8ee2a9e126cb7b105181355 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:55:38 -0800 Subject: net: Handle delivery_time in skb->tstamp during network tapping with af_packet A latter patch will set the skb->mono_delivery_time to flag the skb->tstamp is used as the mono delivery_time (EDT) instead of the (rcv) timestamp. skb_clear_tstamp() will then keep this delivery_time during forwarding. This patch is to make the network tapping (with af_packet) to handle the delivery_time stored in skb->tstamp. Regardless of tapping at the ingress or egress, the tapped skb is received by the af_packet socket, so it is ingress to the af_packet socket and it expects the (rcv) timestamp. When tapping at egress, dev_queue_xmit_nit() is used. It has already expected skb->tstamp may have delivery_time, so it does skb_clone()+net_timestamp_set() to ensure the cloned skb has the (rcv) timestamp before passing to the af_packet sk. This patch only adds to clear the skb->mono_delivery_time bit in net_timestamp_set(). When tapping at ingress, it currently expects the skb->tstamp is either 0 or the (rcv) timestamp. Meaning, the tapping at ingress path has already expected the skb->tstamp could be 0 and it will get the (rcv) timestamp by ktime_get_real() when needed. There are two cases for tapping at ingress: One case is af_packet queues the skb to its sk_receive_queue. The skb is either not shared or new clone created. The newly added skb_clear_delivery_time() is called to clear the delivery_time (if any) and set the (rcv) timestamp if needed before the skb is queued to the sk_receive_queue. Another case, the ingress skb is directly copied to the rx_ring and tpacket_get_timestamp() is used to get the (rcv) timestamp. The newly added skb_tstamp() is used in tpacket_get_timestamp() to check the skb->mono_delivery_time bit before returning skb->tstamp. As mentioned earlier, the tapping@ingress has already expected the skb may not have the (rcv) timestamp (because no sk has asked for it) and has handled this case by directly calling ktime_get_real(). Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 24 ++++++++++++++++++++++++ net/core/dev.c | 4 +++- net/packet/af_packet.c | 4 +++- 3 files changed, 30 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 27a28920e7b3..7e2d796ece80 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3996,6 +3996,22 @@ static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, skb->mono_delivery_time = 0; } +DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); + +/* It is used in the ingress path to clear the delivery_time. + * If needed, set the skb->tstamp to the (rcv) timestamp. + */ +static inline void skb_clear_delivery_time(struct sk_buff *skb) +{ + if (skb->mono_delivery_time) { + skb->mono_delivery_time = 0; + if (static_branch_unlikely(&netstamp_needed_key)) + skb->tstamp = ktime_get_real(); + else + skb->tstamp = 0; + } +} + static inline void skb_clear_tstamp(struct sk_buff *skb) { if (skb->mono_delivery_time) @@ -4004,6 +4020,14 @@ static inline void skb_clear_tstamp(struct sk_buff *skb) skb->tstamp = 0; } +static inline ktime_t skb_tstamp(const struct sk_buff *skb) +{ + if (skb->mono_delivery_time) + return 0; + + return skb->tstamp; +} + static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; diff --git a/net/core/dev.c b/net/core/dev.c index c9e54e5ad48d..e128f26711eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2047,7 +2047,8 @@ void net_dec_egress_queue(void) EXPORT_SYMBOL_GPL(net_dec_egress_queue); #endif -static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); +DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); +EXPORT_SYMBOL(netstamp_needed_key); #ifdef CONFIG_JUMP_LABEL static atomic_t netstamp_needed_deferred; static atomic_t netstamp_wanted; @@ -2108,6 +2109,7 @@ EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp = 0; + skb->mono_delivery_time = 0; if (static_branch_unlikely(&netstamp_needed_key)) __net_timestamp(skb); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ab87f22cc7ec..1b93ce1a5600 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -460,7 +460,7 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, return TP_STATUS_TS_RAW_HARDWARE; if ((flags & SOF_TIMESTAMPING_SOFTWARE) && - ktime_to_timespec64_cond(skb->tstamp, ts)) + ktime_to_timespec64_cond(skb_tstamp(skb), ts)) return TP_STATUS_TS_SOFTWARE; return 0; @@ -2199,6 +2199,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); + skb_clear_delivery_time(skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); @@ -2377,6 +2378,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; + skb_clear_delivery_time(copy_skb); __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); -- cgit v1.2.3-71-gd317 From d93376f503c7a586707925957592c0f16f4db0b1 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:55:44 -0800 Subject: net: Clear mono_delivery_time bit in __skb_tstamp_tx() In __skb_tstamp_tx(), it may clone the egress skb and queues the clone to the sk_error_queue. The outgoing skb may have the mono delivery_time while the (rcv) timestamp is expected for the clone, so the skb->mono_delivery_time bit needs to be cleared from the clone. This patch adds the skb->mono_delivery_time clearing to the existing __net_timestamp() and use it in __skb_tstamp_tx(). The __net_timestamp() fast path usage in dev.c is changed to directly call ktime_get_real() since the mono_delivery_time bit is not set at that point. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/dev.c | 4 ++-- net/core/skbuff.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7e2d796ece80..8e8a4af4f9e2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3981,6 +3981,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); + skb->mono_delivery_time = 0; } static inline ktime_t net_timedelta(ktime_t t) diff --git a/net/core/dev.c b/net/core/dev.c index e128f26711eb..5db2443c2371 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2111,13 +2111,13 @@ static inline void net_timestamp_set(struct sk_buff *skb) skb->tstamp = 0; skb->mono_delivery_time = 0; if (static_branch_unlikely(&netstamp_needed_key)) - __net_timestamp(skb); + skb->tstamp = ktime_get_real(); } #define net_timestamp_check(COND, SKB) \ if (static_branch_unlikely(&netstamp_needed_key)) { \ if ((COND) && !(SKB)->tstamp) \ - __net_timestamp(SKB); \ + (SKB)->tstamp = ktime_get_real(); \ } \ bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9abb0028309f..e5082836295b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4851,7 +4851,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (hwtstamps) *skb_hwtstamps(skb) = *hwtstamps; else - skb->tstamp = ktime_get_real(); + __net_timestamp(skb); __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); } -- cgit v1.2.3-71-gd317 From d98d58a002619b5c165f1eedcd731e2fe2c19088 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:55:50 -0800 Subject: net: Set skb->mono_delivery_time and clear it after sch_handle_ingress() The previous patches handled the delivery_time before sch_handle_ingress(). This patch can now set the skb->mono_delivery_time to flag the skb->tstamp is used as the mono delivery_time (EDT) instead of the (rcv) timestamp and also clear it with skb_clear_delivery_time() after sch_handle_ingress(). This will make the bpf_redirect_*() to keep the mono delivery_time and used by a qdisc (fq) of the egress-ing interface. A latter patch will postpone the skb_clear_delivery_time() until the stack learns that the skb is being delivered locally and that will make other kernel forwarding paths (ip[6]_forward) able to keep the delivery_time also. Thus, like the previous patches on using the skb->mono_delivery_time bit, calling skb_clear_delivery_time() is not limited within the CONFIG_NET_INGRESS to avoid too many code churns among this set. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 3 +-- net/core/dev.c | 8 ++++++-- net/ipv4/ip_output.c | 3 +-- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8e8a4af4f9e2..0f5fd53059cd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3993,8 +3993,7 @@ static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, bool mono) { skb->tstamp = kt; - /* Setting mono_delivery_time will be enabled later */ - skb->mono_delivery_time = 0; + skb->mono_delivery_time = kt && mono; } DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); diff --git a/net/core/dev.c b/net/core/dev.c index 5db2443c2371..4b572bbbd07e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5220,8 +5220,10 @@ another_round: goto out; } - if (skb_skip_tc_classify(skb)) + if (skb_skip_tc_classify(skb)) { + skb_clear_delivery_time(skb); goto skip_classify; + } if (pfmemalloc) goto skip_taps; @@ -5250,12 +5252,14 @@ skip_taps: goto another_round; if (!skb) goto out; + skb_clear_delivery_time(skb); nf_skip_egress(skb, false); if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; - } + } else #endif + skb_clear_delivery_time(skb); skb_reset_redirect(skb); skip_classify: if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a9588e0c82c5..00b4bf26fd93 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1728,8 +1728,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; - /* Setting mono_delivery_time will be enabled later */ - nskb->mono_delivery_time = 0; + nskb->mono_delivery_time = !!transmit_time; ip_push_pending_frames(sk, &fl4); } out: -- cgit v1.2.3-71-gd317 From b6561f8491ca899e5a08311796085c9738d631ae Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:56:09 -0800 Subject: net: ipv6: Get rcv timestamp if needed when handling hop-by-hop IOAM option IOAM is a hop-by-hop option with a temporary iana allocation (49). Since it is hop-by-hop, it is done before the input routing decision. One of the traced data field is the (rcv) timestamp. When the locally generated skb is looping from egress to ingress over a virtual interface (e.g. veth, loopback...), skb->tstamp may have the delivery time before it is known that it will be delivered locally and received by another sk. Like handling the network tapping (tcpdump) in the earlier patch, this patch gets the timestamp if needed without over-writing the delivery_time in the skb->tstamp. skb_tstamp_cond() is added to do the ktime_get_real() with an extra cond arg to check on top of the netstamp_needed_key static key. skb_tstamp_cond() will also be used in a latter patch and it needs the netstamp_needed_key check. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 11 +++++++++++ net/ipv6/ioam6.c | 19 +++++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0f5fd53059cd..4b5b926a81f2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4028,6 +4028,17 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb) return skb->tstamp; } +static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) +{ + if (!skb->mono_delivery_time && skb->tstamp) + return skb->tstamp; + + if (static_branch_unlikely(&netstamp_needed_key) || cond) + return ktime_get_real(); + + return 0; +} + static inline u8 skb_metadata_len(const struct sk_buff *skb) { return skb_shinfo(skb)->meta_len; diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c index e159eb4328a8..1098131ed90c 100644 --- a/net/ipv6/ioam6.c +++ b/net/ipv6/ioam6.c @@ -635,7 +635,8 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, struct ioam6_schema *sc, u8 sclen, bool is_input) { - struct __kernel_sock_timeval ts; + struct timespec64 ts; + ktime_t tstamp; u64 raw64; u32 raw32; u16 raw16; @@ -680,10 +681,9 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, if (!skb->dev) { *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); } else { - if (!skb->tstamp) - __net_timestamp(skb); + tstamp = skb_tstamp_cond(skb, true); + ts = ktime_to_timespec64(tstamp); - skb_get_new_timestamp(skb, &ts); *(__be32 *)data = cpu_to_be32((u32)ts.tv_sec); } data += sizeof(__be32); @@ -694,13 +694,12 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb, if (!skb->dev) { *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); } else { - if (!skb->tstamp) - __net_timestamp(skb); + if (!trace->type.bit2) { + tstamp = skb_tstamp_cond(skb, true); + ts = ktime_to_timespec64(tstamp); + } - if (!trace->type.bit2) - skb_get_new_timestamp(skb, &ts); - - *(__be32 *)data = cpu_to_be32((u32)ts.tv_usec); + *(__be32 *)data = cpu_to_be32((u32)(ts.tv_nsec / NSEC_PER_USEC)); } data += sizeof(__be32); } -- cgit v1.2.3-71-gd317 From 7449197d600d30d038b1ce6285ab4747096eac7f Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:56:28 -0800 Subject: bpf: Keep the (rcv) timestamp behavior for the existing tc-bpf@ingress The current tc-bpf@ingress reads and writes the __sk_buff->tstamp as a (rcv) timestamp which currently could either be 0 (not available) or ktime_get_real(). This patch is to backward compatible with the (rcv) timestamp expectation at ingress. If the skb->tstamp has the delivery_time, the bpf insn rewrite will read 0 for tc-bpf running at ingress as it is not available. When writing at ingress, it will also clear the skb->mono_delivery_time bit. /* BPF_READ: a = __sk_buff->tstamp */ if (!skb->tc_at_ingress || !skb->mono_delivery_time) a = skb->tstamp; else a = 0 /* BPF_WRITE: __sk_buff->tstamp = a */ if (skb->tc_at_ingress) skb->mono_delivery_time = 0; skb->tstamp = a; [ A note on the BPF_CGROUP_INET_INGRESS which can also access skb->tstamp. At that point, the skb is delivered locally and skb_clear_delivery_time() has already been done, so the skb->tstamp will only have the (rcv) timestamp. ] If the tc-bpf@egress writes 0 to skb->tstamp, the skb->mono_delivery_time has to be cleared also. It could be done together during convert_ctx_access(). However, the latter patch will also expose the skb->mono_delivery_time bit as __sk_buff->delivery_time_type. Changing the delivery_time_type in the background may surprise the user, e.g. the 2nd read on __sk_buff->delivery_time_type may need a READ_ONCE() to avoid compiler optimization. Thus, in expecting the needs in the latter patch, this patch does a check on !skb->tstamp after running the tc-bpf and clears the skb->mono_delivery_time bit if needed. The earlier discussion on v4 [0]. The bpf insn rewrite requires the skb's mono_delivery_time bit and tc_at_ingress bit. They are moved up in sk_buff so that bpf rewrite can be done at a fixed offset. tc_skip_classify is moved together with tc_at_ingress. To get one bit for mono_delivery_time, csum_not_inet is moved down and this bit is currently used by sctp. [0]: https://lore.kernel.org/bpf/20220217015043.khqwqklx45c4m4se@kafai-mbp.dhcp.thefacebook.com/ Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/skbuff.h | 18 ++++++++----- net/core/filter.c | 71 +++++++++++++++++++++++++++++++++++++++++++------- net/sched/act_bpf.c | 2 ++ net/sched/cls_bpf.c | 2 ++ 4 files changed, 77 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4b5b926a81f2..5445860e1ba6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -941,8 +941,12 @@ struct sk_buff { __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */ __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_not_inet:1; __u8 dst_pending_confirm:1; + __u8 mono_delivery_time:1; +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; +#endif #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -953,10 +957,6 @@ struct sk_buff { #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; __u8 offload_l3_fwd_mark:1; -#endif -#ifdef CONFIG_NET_CLS_ACT - __u8 tc_skip_classify:1; - __u8 tc_at_ingress:1; #endif __u8 redirected:1; #ifdef CONFIG_NET_REDIRECT @@ -969,7 +969,7 @@ struct sk_buff { __u8 decrypted:1; #endif __u8 slow_gro:1; - __u8 mono_delivery_time:1; + __u8 csum_not_inet:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -1047,10 +1047,16 @@ struct sk_buff { /* if you move pkt_vlan_present around you also must adapt these constants */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_VLAN_PRESENT_BIT 7 +#define TC_AT_INGRESS_MASK (1 << 0) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) #else #define PKT_VLAN_PRESENT_BIT 0 +#define TC_AT_INGRESS_MASK (1 << 7) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) +#define TC_AT_INGRESS_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) +#define SKB_MONO_DELIVERY_TIME_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) #ifdef __KERNEL__ /* diff --git a/net/core/filter.c b/net/core/filter.c index cfcf9b4d1ec2..5072733743e9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8859,6 +8859,65 @@ static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, return insn; } +static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_insn *si, + struct bpf_insn *insn) +{ + __u8 value_reg = si->dst_reg; + __u8 skb_reg = si->src_reg; + +#ifdef CONFIG_NET_CLS_ACT + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); + /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, + * so check the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ + *insn++ = BPF_MOV64_IMM(value_reg, 0); + *insn++ = BPF_JMP_A(1); +#endif + + *insn++ = BPF_LDX_MEM(BPF_DW, value_reg, skb_reg, + offsetof(struct sk_buff, tstamp)); + return insn; +} + +static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_insn *si, + struct bpf_insn *insn) +{ + __u8 value_reg = si->src_reg; + __u8 skb_reg = si->dst_reg; + +#ifdef CONFIG_NET_CLS_ACT + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); + /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. + * Clear the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + ~SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); +#endif + + /* skb->tstamp = tstamp */ + *insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg, + offsetof(struct sk_buff, tstamp)); + return insn; +} + static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, @@ -9167,17 +9226,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_DW, - si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, - tstamp, 8, - target_size)); + insn = bpf_convert_tstamp_write(si, insn); else - *insn++ = BPF_LDX_MEM(BPF_DW, - si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, - tstamp, 8, - target_size)); + insn = bpf_convert_tstamp_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index a77d8908e737..fea2d78b9ddc 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -53,6 +53,8 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(filter, skb); } + if (unlikely(!skb->tstamp && skb->mono_delivery_time)) + skb->mono_delivery_time = 0; if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) skb_orphan(skb); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index df19a847829e..c85b85a192bf 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -102,6 +102,8 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(prog->filter, skb); } + if (unlikely(!skb->tstamp && skb->mono_delivery_time)) + skb->mono_delivery_time = 0; if (prog->exts_integrated) { res->class = 0; -- cgit v1.2.3-71-gd317 From 8d21ec0e46ed6e39994accff8eb4f2be3d2e76b5 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:56:34 -0800 Subject: bpf: Add __sk_buff->delivery_time_type and bpf_skb_set_skb_delivery_time() * __sk_buff->delivery_time_type: This patch adds __sk_buff->delivery_time_type. It tells if the delivery_time is stored in __sk_buff->tstamp or not. It will be most useful for ingress to tell if the __sk_buff->tstamp has the (rcv) timestamp or delivery_time. If delivery_time_type is 0 (BPF_SKB_DELIVERY_TIME_NONE), it has the (rcv) timestamp. Two non-zero types are defined for the delivery_time_type, BPF_SKB_DELIVERY_TIME_MONO and BPF_SKB_DELIVERY_TIME_UNSPEC. For UNSPEC, it can only happen in egress because only mono delivery_time can be forwarded to ingress now. The clock of UNSPEC delivery_time can be deduced from the skb->sk->sk_clockid which is how the sch_etf doing it also. * Provide forwarded delivery_time to tc-bpf@ingress: With the help of the new delivery_time_type, the tc-bpf has a way to tell if the __sk_buff->tstamp has the (rcv) timestamp or the delivery_time. During bpf load time, the verifier will learn if the bpf prog has accessed the new __sk_buff->delivery_time_type. If it does, it means the tc-bpf@ingress is expecting the skb->tstamp could have the delivery_time. The kernel will then read the skb->tstamp as-is during bpf insn rewrite without checking the skb->mono_delivery_time. This is done by adding a new prog->delivery_time_access bit. The same goes for writing skb->tstamp. * bpf_skb_set_delivery_time(): The bpf_skb_set_delivery_time() helper is added to allow setting both delivery_time and the delivery_time_type at the same time. If the tc-bpf does not need to change the delivery_time_type, it can directly write to the __sk_buff->tstamp as the existing tc-bpf has already been doing. It will be most useful at ingress to change the __sk_buff->tstamp from the (rcv) timestamp to a mono delivery_time and then bpf_redirect_*(). bpf only has mono clock helper (bpf_ktime_get_ns), and the current known use case is the mono EDT for fq, and only mono delivery time can be kept during forward now, so bpf_skb_set_delivery_time() only supports setting BPF_SKB_DELIVERY_TIME_MONO. It can be extended later when use cases come up and the forwarding path also supports other clock bases. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/filter.h | 3 +- include/uapi/linux/bpf.h | 41 +++++++++- net/core/filter.c | 169 ++++++++++++++++++++++++++++++++--------- tools/include/uapi/linux/bpf.h | 41 +++++++++- 4 files changed, 216 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 1cb1af917617..9bf26307247f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -572,7 +572,8 @@ struct bpf_prog { has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1; /* Do we call get_func_ip() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ + delivery_time_access:1; /* Accessed __sk_buff->delivery_time_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index afe3d0d7f5f2..4eebea830613 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5086,6 +5086,37 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. + * + * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * Description + * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also + * change the __sk_buff->delivery_time_type to *dtime_type*. + * + * When setting a delivery time (non zero *dtime*) to + * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* + * is supported. It is the only delivery_time_type that will be + * kept after bpf_redirect_*(). + * + * If there is no need to change the __sk_buff->delivery_time_type, + * the delivery time can be directly written to __sk_buff->tstamp + * instead. + * + * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE + * can be used to clear any delivery time stored in + * __sk_buff->tstamp. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * Return + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5280,6 +5311,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ + FN(skb_set_delivery_time), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5469,6 +5501,12 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +enum { + BPF_SKB_DELIVERY_TIME_NONE, + BPF_SKB_DELIVERY_TIME_UNSPEC, + BPF_SKB_DELIVERY_TIME_MONO, +}; + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -5509,7 +5547,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u32 :32; /* Padding, future use. */ + __u8 delivery_time_type; + __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; diff --git a/net/core/filter.c b/net/core/filter.c index 5072733743e9..88767f7da150 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7388,6 +7388,43 @@ static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_skb_set_delivery_time, struct sk_buff *, skb, + u64, dtime, u32, dtime_type) +{ + /* skb_clear_delivery_time() is done for inet protocol */ + if (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)) + return -EOPNOTSUPP; + + switch (dtime_type) { + case BPF_SKB_DELIVERY_TIME_MONO: + if (!dtime) + return -EINVAL; + skb->tstamp = dtime; + skb->mono_delivery_time = 1; + break; + case BPF_SKB_DELIVERY_TIME_NONE: + if (dtime) + return -EINVAL; + skb->tstamp = 0; + skb->mono_delivery_time = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct bpf_func_proto bpf_skb_set_delivery_time_proto = { + .func = bpf_skb_set_delivery_time, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + #endif /* CONFIG_INET */ bool bpf_helper_changes_pkt_data(void *func) @@ -7749,6 +7786,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_gen_syncookie_proto; case BPF_FUNC_sk_assign: return &bpf_sk_assign_proto; + case BPF_FUNC_skb_set_delivery_time: + return &bpf_skb_set_delivery_time_proto; #endif default: return bpf_sk_base_func_proto(func_id); @@ -8088,7 +8127,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; - case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1: + case offsetof(struct __sk_buff, delivery_time_type): + return false; + case offsetofend(struct __sk_buff, delivery_time_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: /* Explicitly prohibit access to padding in __sk_buff. */ return false; default: @@ -8443,6 +8484,15 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; + case offsetof(struct __sk_buff, delivery_time_type): + /* The convert_ctx_access() on reading and writing + * __sk_buff->tstamp depends on whether the bpf prog + * has used __sk_buff->delivery_time_type or not. + * Thus, we need to set prog->delivery_time_access + * earlier during is_valid_access() here. + */ + ((struct bpf_prog *)prog)->delivery_time_access = 1; + return size == sizeof(__u8); } return bpf_skb_is_valid_access(off, size, type, prog, info); @@ -8838,6 +8888,45 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } +static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, + struct bpf_insn *insn) +{ + __u8 value_reg = si->dst_reg; + __u8 skb_reg = si->src_reg; + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* value_reg = BPF_SKB_DELIVERY_TIME_MONO */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_MONO); + *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 10 : 5); + + *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, skb_reg, + offsetof(struct sk_buff, tstamp)); + *insn++ = BPF_JMP_IMM(BPF_JNE, tmp_reg, 0, 2); + /* value_reg = BPF_SKB_DELIVERY_TIME_NONE */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_NONE); + *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); + +#ifdef CONFIG_NET_CLS_ACT + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* At ingress, value_reg = 0 */ + *insn++ = BPF_MOV32_IMM(value_reg, 0); + *insn++ = BPF_JMP_A(1); +#endif + + /* value_reg = BPF_SKB_DELIVERYT_TIME_UNSPEC */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_UNSPEC); + + /* 15 insns with CONFIG_NET_CLS_ACT */ + return insn; +} + static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, struct bpf_insn *insn) { @@ -8859,29 +8948,32 @@ static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, return insn; } -static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_insn *si, +static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, + const struct bpf_insn *si, struct bpf_insn *insn) { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT - __u8 tmp_reg = BPF_REG_AX; - - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); - /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, - * so check the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ - *insn++ = BPF_MOV64_IMM(value_reg, 0); - *insn++ = BPF_JMP_A(1); + if (!prog->delivery_time_access) { + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); + /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, + * so check the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ + *insn++ = BPF_MOV64_IMM(value_reg, 0); + *insn++ = BPF_JMP_A(1); + } #endif *insn++ = BPF_LDX_MEM(BPF_DW, value_reg, skb_reg, @@ -8889,27 +8981,30 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_insn *si, return insn; } -static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_insn *si, +static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, + const struct bpf_insn *si, struct bpf_insn *insn) { __u8 value_reg = si->src_reg; __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT - __u8 tmp_reg = BPF_REG_AX; - - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); - /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. - * Clear the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - ~SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + if (!prog->delivery_time_access) { + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); + /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. + * Clear the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + ~SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + } #endif /* skb->tstamp = tstamp */ @@ -9226,9 +9321,13 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) - insn = bpf_convert_tstamp_write(si, insn); + insn = bpf_convert_tstamp_write(prog, si, insn); else - insn = bpf_convert_tstamp_read(si, insn); + insn = bpf_convert_tstamp_read(prog, si, insn); + break; + + case offsetof(struct __sk_buff, delivery_time_type): + insn = bpf_convert_dtime_type_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index afe3d0d7f5f2..4eebea830613 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5086,6 +5086,37 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. + * + * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * Description + * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also + * change the __sk_buff->delivery_time_type to *dtime_type*. + * + * When setting a delivery time (non zero *dtime*) to + * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* + * is supported. It is the only delivery_time_type that will be + * kept after bpf_redirect_*(). + * + * If there is no need to change the __sk_buff->delivery_time_type, + * the delivery time can be directly written to __sk_buff->tstamp + * instead. + * + * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE + * can be used to clear any delivery time stored in + * __sk_buff->tstamp. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * Return + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5280,6 +5311,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ + FN(skb_set_delivery_time), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5469,6 +5501,12 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +enum { + BPF_SKB_DELIVERY_TIME_NONE, + BPF_SKB_DELIVERY_TIME_UNSPEC, + BPF_SKB_DELIVERY_TIME_MONO, +}; + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -5509,7 +5547,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u32 :32; /* Padding, future use. */ + __u8 delivery_time_type; + __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; -- cgit v1.2.3-71-gd317 From 5c3f1f9cc4cbbf491233982b5975ae2d284de5df Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:35 +1100 Subject: mm: remove the __KERNEL__ guard from __KERNEL__ ifdefs don't make sense outside of include/uapi/. Link: https://lkml.kernel.org/r/20220210072828.2930359-3-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Chaitanya Kulkarni Reviewed-by: Muchun Song Reviewed-by: Dan Williams Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Christian Knig Cc: Felix Kuehling Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: "Pan, Xinhui" Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 74ee50c2033b..2cca8cd30186 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3,9 +3,6 @@ #define _LINUX_MM_H #include - -#ifdef __KERNEL__ - #include #include #include @@ -3379,5 +3376,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start, } #endif -#endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ -- cgit v1.2.3-71-gd317 From 730ff52194cdb324b7680e5054c546f7b52de8a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:35 +1100 Subject: mm: remove pointless includes from hmm.h pulls in the world for no good reason at all. Remove the includes and push a few ones into the users instead. Link: https://lkml.kernel.org/r/20220210072828.2930359-4-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Chaitanya Kulkarni Reviewed-by: Muchun Song Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Christian Knig Cc: Dan Williams Cc: Felix Kuehling Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: "Pan, Xinhui" Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 1 + drivers/gpu/drm/nouveau/nouveau_dmem.c | 1 + include/linux/hmm.h | 9 ++------- lib/test_hmm.c | 2 ++ 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index ed5385137f48..cb835f95a76e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 3828aafd3ac4..e886a3b9e08c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -39,6 +39,7 @@ #include #include +#include /* * FIXME: this is ugly right now we are using TTM to allocate vram and we pin diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 2fd2e91d5107..d5a6f101f843 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -9,14 +9,9 @@ #ifndef LINUX_HMM_H #define LINUX_HMM_H -#include -#include +#include -#include -#include -#include -#include -#include +struct mmu_interval_notifier; /* * On output: diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 767538089a62..396beee6b061 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "test_hmm_uapi.h" -- cgit v1.2.3-71-gd317 From 75e55d8a107edb2fd6e02b1fa8a81531209cda04 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:35 +1100 Subject: mm: move free_devmap_managed_page to memremap.c free_devmap_managed_page has nothing to do with the code in swap.c, move it to live with the rest of the code for devmap handling. Link: https://lkml.kernel.org/r/20220210072828.2930359-5-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Chaitanya Kulkarni Reviewed-by: Muchun Song Reviewed-by: Dan Williams Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Christian Knig Cc: Felix Kuehling Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: "Pan, Xinhui" Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 1 - mm/memremap.c | 21 +++++++++++++++++++++ mm/swap.c | 23 ----------------------- 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2cca8cd30186..a9d6473fc045 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1092,7 +1092,6 @@ static inline bool is_zone_movable_page(const struct page *page) } #ifdef CONFIG_DEV_PAGEMAP_OPS -void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); static inline bool page_is_devmap_managed(struct page *page) diff --git a/mm/memremap.c b/mm/memremap.c index 5f04a0709e43..55d23e9f5c04 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -501,4 +501,25 @@ void free_devmap_managed_page(struct page *page) page->mapping = NULL; page->pgmap->ops->page_free(page); } + +void put_devmap_managed_page(struct page *page) +{ + int count; + + if (WARN_ON_ONCE(!page_is_devmap_managed(page))) + return; + + count = page_ref_dec_return(page); + + /* + * devmap page refcounts are 1-based, rather than 0-based: if + * refcount is 1, then the page is free and the refcount is + * stable because nobody holds a reference on the page. + */ + if (count == 1) + free_devmap_managed_page(page); + else if (!count) + __put_page(page); +} +EXPORT_SYMBOL(put_devmap_managed_page); #endif /* CONFIG_DEV_PAGEMAP_OPS */ diff --git a/mm/swap.c b/mm/swap.c index 842d5cd92cf6..e499df864ef7 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1154,26 +1154,3 @@ void __init swap_setup(void) * _really_ don't want to cluster much more */ } - -#ifdef CONFIG_DEV_PAGEMAP_OPS -void put_devmap_managed_page(struct page *page) -{ - int count; - - if (WARN_ON_ONCE(!page_is_devmap_managed(page))) - return; - - count = page_ref_dec_return(page); - - /* - * devmap page refcounts are 1-based, rather than 0-based: if - * refcount is 1, then the page is free and the refcount is - * stable because nobody holds a reference on the page. - */ - if (count == 1) - free_devmap_managed_page(page); - else if (!count) - __put_page(page); -} -EXPORT_SYMBOL(put_devmap_managed_page); -#endif -- cgit v1.2.3-71-gd317 From 895749455f6054e0c7b40a6ec449a3ab6db51bdd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:35 +1100 Subject: mm: simplify freeing of devmap managed pages Make put_devmap_managed_page return if it took charge of the page or not and remove the separate page_is_devmap_managed helper. Link: https://lkml.kernel.org/r/20220210072828.2930359-6-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Chaitanya Kulkarni Reviewed-by: Dan Williams Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Christian Knig Cc: Felix Kuehling Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: Muchun Song Cc: "Pan, Xinhui" Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 34 ++++++++++------------------------ mm/memremap.c | 20 +++++++++----------- mm/swap.c | 10 +--------- 3 files changed, 20 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index a9d6473fc045..8a59f0456149 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct page *page) #ifdef CONFIG_DEV_PAGEMAP_OPS DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -static inline bool page_is_devmap_managed(struct page *page) +bool __put_devmap_managed_page(struct page *page); +static inline bool put_devmap_managed_page(struct page *page) { if (!static_branch_unlikely(&devmap_managed_key)) return false; if (!is_zone_device_page(page)) return false; - switch (page->pgmap->type) { - case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_FS_DAX: - return true; - default: - break; - } - return false; + if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && + page->pgmap->type != MEMORY_DEVICE_FS_DAX) + return false; + return __put_devmap_managed_page(page); } -void put_devmap_managed_page(struct page *page); - #else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline bool page_is_devmap_managed(struct page *page) +static inline bool put_devmap_managed_page(struct page *page) { return false; } - -static inline void put_devmap_managed_page(struct page *page) -{ -} #endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool is_device_private_page(const struct page *page) @@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page) struct folio *folio = page_folio(page); /* - * For devmap managed pages we need to catch refcount transition from - * 2 to 1, when refcount reach one it means the page is free and we - * need to inform the device driver through callback. See - * include/linux/memremap.h and HMM for details. + * For some devmap managed pages we need to catch refcount transition + * from 2 to 1: */ - if (page_is_devmap_managed(&folio->page)) { - put_devmap_managed_page(&folio->page); + if (put_devmap_managed_page(&folio->page)) return; - } - folio_put(folio); } diff --git a/mm/memremap.c b/mm/memremap.c index 55d23e9f5c04..f41233a67edb 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page) page->pgmap->ops->page_free(page); } -void put_devmap_managed_page(struct page *page) +bool __put_devmap_managed_page(struct page *page) { - int count; - - if (WARN_ON_ONCE(!page_is_devmap_managed(page))) - return; - - count = page_ref_dec_return(page); - /* * devmap page refcounts are 1-based, rather than 0-based: if * refcount is 1, then the page is free and the refcount is * stable because nobody holds a reference on the page. */ - if (count == 1) + switch (page_ref_dec_return(page)) { + case 1: free_devmap_managed_page(page); - else if (!count) + break; + case 0: __put_page(page); + break; + } + return true; } -EXPORT_SYMBOL(put_devmap_managed_page); +EXPORT_SYMBOL(__put_devmap_managed_page); #endif /* CONFIG_DEV_PAGEMAP_OPS */ diff --git a/mm/swap.c b/mm/swap.c index e499df864ef7..db8d0eea13d7 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - /* - * ZONE_DEVICE pages that return 'false' from - * page_is_devmap_managed() do not require special - * processing, and instead, expect a call to - * put_page_testzero(). - */ - if (page_is_devmap_managed(page)) { - put_devmap_managed_page(page); + if (put_devmap_managed_page(page)) continue; - } if (put_page_testzero(page)) put_dev_pagemap(page->pgmap); continue; -- cgit v1.2.3-71-gd317 From dc90f0846df4870b6cc8528c31e5c60f18fb68be Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:36 +1100 Subject: mm: don't include in Move the check for the actual pgmap types that need the free at refcount one behavior into the out of line helper, and thus avoid the need to pull memremap.h into mm.h. Link: https://lkml.kernel.org/r/20220210072828.2930359-7-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Dan Williams Acked-by: Felix Kuehling Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Chaitanya Kulkarni Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: Muchun Song Cc: "Pan, Xinhui" Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- arch/arm64/mm/mmu.c | 1 + arch/powerpc/kvm/book3s_hv_uvmem.c | 1 + arch/powerpc/mm/book3s64/pgtable.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + drivers/gpu/drm/drm_cache.c | 2 +- drivers/gpu/drm/nouveau/nouveau_dmem.c | 1 + drivers/gpu/drm/nouveau/nouveau_svm.c | 1 + drivers/infiniband/core/rw.c | 1 + drivers/nvdimm/pmem.h | 1 + drivers/nvme/host/pci.c | 1 + drivers/nvme/target/io-cmd-bdev.c | 1 + fs/fuse/virtio_fs.c | 1 + fs/proc/page.c | 1 + include/linux/memremap.h | 18 ++++++++++++++++++ include/linux/mm.h | 20 -------------------- lib/test_hmm.c | 1 + mm/memcontrol.c | 1 + mm/memremap.c | 6 +++++- 18 files changed, 38 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index acfae9b41cc8..580abae6c0b9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index e414ca44839f..881951604227 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 79ce3c22a29d..052e6590f84f 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ea68f3b3a4e9..6d643b4b791d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index f19d9acbe959..50b8a088f763 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -27,11 +27,11 @@ /* * Authors: Thomas Hellström */ - #include #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index e886a3b9e08c..a5cdfbe32b5e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -39,6 +39,7 @@ #include #include +#include #include /* diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 266809e511e2..090b9b47708c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -35,6 +35,7 @@ #include #include #include +#include #include struct nouveau_svm { diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 5a3bd41b331c..4d98f931a13d 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2016 HGST, a Western Digital Company. */ +#include #include #include #include diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 59cfe13ea8a8..1f51a2361429 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -3,6 +3,7 @@ #define __NVDIMM_PMEM_H__ #include #include +#include #include #include #include diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6a99ed680915..ab15bc72710d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 70ca9dfc1771..a141446db1be 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include +#include #include #include "nvmet.h" diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 9d737904d07c..86b7dbb6a0d4 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/proc/page.c b/fs/proc/page.c index 9f1077d94cde..a2873a617ae8 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1fafcc38acba..514ab46f597e 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ + +#include #include #include #include @@ -129,6 +131,22 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) return 1 << pgmap->vmemmap_shift; } +static inline bool is_device_private_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; +} + +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_PCI_P2PDMA) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; +} + #ifdef CONFIG_ZONE_DEVICE void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); diff --git a/include/linux/mm.h b/include/linux/mm.h index 8a59f0456149..cb8bee88e70c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -1101,9 +1100,6 @@ static inline bool put_devmap_managed_page(struct page *page) return false; if (!is_zone_device_page(page)) return false; - if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && - page->pgmap->type != MEMORY_DEVICE_FS_DAX) - return false; return __put_devmap_managed_page(page); } @@ -1114,22 +1110,6 @@ static inline bool put_devmap_managed_page(struct page *page) } #endif /* CONFIG_DEV_PAGEMAP_OPS */ -static inline bool is_device_private_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_DEVICE_PRIVATE) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_PCI_P2PDMA) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} - /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 396beee6b061..e5fc14ba71f3 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c78b9d3b9c04..2c5032294c9f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/memremap.c b/mm/memremap.c index f41233a67edb..a0ece2344c2c 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -504,6 +504,10 @@ void free_devmap_managed_page(struct page *page) bool __put_devmap_managed_page(struct page *page) { + if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && + page->pgmap->type != MEMORY_DEVICE_FS_DAX) + return false; + /* * devmap page refcounts are 1-based, rather than 0-based: if * refcount is 1, then the page is free and the refcount is -- cgit v1.2.3-71-gd317 From 27674ef6c73f0c9096a9827dc5d6ba9fc7808422 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2022 15:31:36 +1100 Subject: mm: remove the extra ZONE_DEVICE struct page refcount ZONE_DEVICE struct pages have an extra reference count that complicates the code for put_page() and several places in the kernel that need to check the reference count to see that a page is not being used (gup, compaction, migration, etc.). Clean up the code so the reference count doesn't need to be treated specially for ZONE_DEVICE pages. Note that this excludes the special idle page wakeup for fsdax pages, which still happens at refcount 1. This is a separate issue and will be sorted out later. Given that only fsdax pages require the notifiacation when the refcount hits 1 now, the PAGEMAP_OPS Kconfig symbol can go away and be replaced with a FS_DAX check for this hook in the put_page fastpath. Based on an earlier patch from Ralph Campbell . Link: https://lkml.kernel.org/r/20220210072828.2930359-8-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Logan Gunthorpe Reviewed-by: Ralph Campbell Reviewed-by: Jason Gunthorpe Reviewed-by: Dan Williams Acked-by: Felix Kuehling Tested-by: "Sierra Guiza, Alejandro (Alex)" Cc: Alex Deucher Cc: Alistair Popple Cc: Ben Skeggs Cc: Chaitanya Kulkarni Cc: Christian Knig Cc: Karol Herbst Cc: Lyude Paul Cc: Miaohe Lin Cc: Muchun Song Cc: "Pan, Xinhui" Signed-off-by: Andrew Morton Signed-off-by: Matthew Wilcox (Oracle) --- arch/powerpc/kvm/book3s_hv_uvmem.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 1 - drivers/gpu/drm/nouveau/nouveau_dmem.c | 1 - fs/Kconfig | 1 - include/linux/memremap.h | 12 +++---- include/linux/mm.h | 6 ++-- lib/test_hmm.c | 1 - mm/Kconfig | 4 --- mm/internal.h | 2 ++ mm/memcontrol.c | 11 ++---- mm/memremap.c | 57 +++++++++++--------------------- mm/migrate.c | 6 ---- mm/swap.c | 16 +++------ 13 files changed, 36 insertions(+), 83 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 881951604227..8cabdb39cbbc 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -713,7 +713,6 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; - get_page(dpage); lock_page(dpage); return dpage; out_clear: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index cb835f95a76e..e27ca3758762 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -225,7 +225,6 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) page = pfn_to_page(pfn); svm_range_bo_ref(prange->svm_bo); page->zone_device_data = prange->svm_bo; - get_page(page); lock_page(page); } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index a5cdfbe32b5e..7ba66ad68a8a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -326,7 +326,6 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm) return NULL; } - get_page(page); lock_page(page); return page; } diff --git a/fs/Kconfig b/fs/Kconfig index 6c7dc1387beb..e9433bbc4801 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -48,7 +48,6 @@ config FS_DAX bool "File system based Direct Access (DAX) support" depends on MMU depends on !(ARM || MIPS || SPARC) - select DEV_PAGEMAP_OPS if (ZONE_DEVICE && !FS_DAX_LIMITED) select FS_IOMAP select DAX help diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 514ab46f597e..d6a114dd5ea8 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -68,9 +68,9 @@ enum memory_type { struct dev_pagemap_ops { /* - * Called once the page refcount reaches 1. (ZONE_DEVICE pages never - * reach 0 refcount unless there is a refcount bug. This allows the - * device driver to implement its own memory management.) + * Called once the page refcount reaches 0. The reference count will be + * reset to one by the core code after the method is called to prepare + * for handing out the page again. */ void (*page_free)(struct page *page); @@ -133,16 +133,14 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) static inline bool is_device_private_page(const struct page *page) { - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PRIVATE; } static inline bool is_pci_p2pdma_page(const struct page *page) { - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_PCI_P2PDMA) && + return IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } diff --git a/include/linux/mm.h b/include/linux/mm.h index cb8bee88e70c..0201d258c646 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1090,7 +1090,7 @@ static inline bool is_zone_movable_page(const struct page *page) return page_zonenum(page) == ZONE_MOVABLE; } -#ifdef CONFIG_DEV_PAGEMAP_OPS +#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); bool __put_devmap_managed_page(struct page *page); @@ -1103,12 +1103,12 @@ static inline bool put_devmap_managed_page(struct page *page) return __put_devmap_managed_page(page); } -#else /* CONFIG_DEV_PAGEMAP_OPS */ +#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ static inline bool put_devmap_managed_page(struct page *page) { return false; } -#endif /* CONFIG_DEV_PAGEMAP_OPS */ +#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ diff --git a/lib/test_hmm.c b/lib/test_hmm.c index e5fc14ba71f3..cfe632047839 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -566,7 +566,6 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) } dpage->zone_device_data = rpage; - get_page(dpage); lock_page(dpage); return dpage; diff --git a/mm/Kconfig b/mm/Kconfig index 3326ee3903f3..a1901ae6d062 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -776,9 +776,6 @@ config ZONE_DEVICE If FS_DAX is enabled, then say Y. -config DEV_PAGEMAP_OPS - bool - # # Helpers to mirror range of the CPU page tables of a process into device page # tables. @@ -790,7 +787,6 @@ config HMM_MIRROR config DEVICE_PRIVATE bool "Unaddressable device memory (GPU memory, ...)" depends on ZONE_DEVICE - select DEV_PAGEMAP_OPS help Allows creation of struct pages to represent unaddressable device diff --git a/mm/internal.h b/mm/internal.h index 450a2c8a43f3..3756dd5d2c92 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -735,4 +735,6 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); +void free_zone_device_page(struct page *page); + #endif /* __MM_INTERNAL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2c5032294c9f..8fef072dc1ce 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5503,17 +5503,12 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, return NULL; /* - * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to - * a device and because they are not accessible by CPU they are store - * as special swap entry in the CPU page table. + * Handle device private pages that are not accessible by the CPU, but + * stored as special swap entries in the page table. */ if (is_device_private_entry(ent)) { page = pfn_swap_entry_to_page(ent); - /* - * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have - * a refcount of 1 when free (unlike normal page) - */ - if (!page_ref_add_unless(page, 1, 1)) + if (!get_page_unless_zero(page)) return NULL; return page; } diff --git a/mm/memremap.c b/mm/memremap.c index a0ece2344c2c..fef5734d5e49 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -12,6 +12,7 @@ #include #include #include +#include "internal.h" static DEFINE_XARRAY(pgmap_array); @@ -37,21 +38,19 @@ unsigned long memremap_compat_align(void) EXPORT_SYMBOL_GPL(memremap_compat_align); #endif -#ifdef CONFIG_DEV_PAGEMAP_OPS +#ifdef CONFIG_FS_DAX DEFINE_STATIC_KEY_FALSE(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key); static void devmap_managed_enable_put(struct dev_pagemap *pgmap) { - if (pgmap->type == MEMORY_DEVICE_PRIVATE || - pgmap->type == MEMORY_DEVICE_FS_DAX) + if (pgmap->type == MEMORY_DEVICE_FS_DAX) static_branch_dec(&devmap_managed_key); } static void devmap_managed_enable_get(struct dev_pagemap *pgmap) { - if (pgmap->type == MEMORY_DEVICE_PRIVATE || - pgmap->type == MEMORY_DEVICE_FS_DAX) + if (pgmap->type == MEMORY_DEVICE_FS_DAX) static_branch_inc(&devmap_managed_key); } #else @@ -61,7 +60,7 @@ static void devmap_managed_enable_get(struct dev_pagemap *pgmap) static void devmap_managed_enable_put(struct dev_pagemap *pgmap) { } -#endif /* CONFIG_DEV_PAGEMAP_OPS */ +#endif /* CONFIG_FS_DAX */ static void pgmap_array_delete(struct range *range) { @@ -102,23 +101,12 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) return (range->start + range_len(range)) >> PAGE_SHIFT; } -static unsigned long pfn_next(struct dev_pagemap *pgmap, unsigned long pfn) -{ - if (pfn % (1024 << pgmap->vmemmap_shift)) - cond_resched(); - return pfn + pgmap_vmemmap_nr(pgmap); -} - static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) { return (pfn_end(pgmap, range_id) - pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; } -#define for_each_device_pfn(pfn, map, i) \ - for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); \ - pfn = pfn_next(map, pfn)) - static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) { struct range *range = &pgmap->ranges[range_id]; @@ -147,13 +135,11 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) void memunmap_pages(struct dev_pagemap *pgmap) { - unsigned long pfn; int i; percpu_ref_kill(&pgmap->ref); for (i = 0; i < pgmap->nr_range; i++) - for_each_device_pfn(pfn, pgmap, i) - put_page(pfn_to_page(pfn)); + percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); wait_for_completion(&pgmap->done); percpu_ref_exit(&pgmap->ref); @@ -464,14 +450,10 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, } EXPORT_SYMBOL_GPL(get_dev_pagemap); -#ifdef CONFIG_DEV_PAGEMAP_OPS -void free_devmap_managed_page(struct page *page) +void free_zone_device_page(struct page *page) { - /* notify page idle for dax */ - if (!is_device_private_page(page)) { - wake_up_var(&page->_refcount); + if (WARN_ON_ONCE(!is_device_private_page(page))) return; - } __ClearPageWaiters(page); @@ -500,28 +482,27 @@ void free_devmap_managed_page(struct page *page) */ page->mapping = NULL; page->pgmap->ops->page_free(page); + + /* + * Reset the page count to 1 to prepare for handing out the page again. + */ + set_page_count(page, 1); } +#ifdef CONFIG_FS_DAX bool __put_devmap_managed_page(struct page *page) { - if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && - page->pgmap->type != MEMORY_DEVICE_FS_DAX) + if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) return false; /* - * devmap page refcounts are 1-based, rather than 0-based: if + * fsdax page refcounts are 1-based, rather than 0-based: if * refcount is 1, then the page is free and the refcount is * stable because nobody holds a reference on the page. */ - switch (page_ref_dec_return(page)) { - case 1: - free_devmap_managed_page(page); - break; - case 0: - __put_page(page); - break; - } + if (page_ref_dec_return(page) == 1) + wake_up_var(&page->_refcount); return true; } EXPORT_SYMBOL(__put_devmap_managed_page); -#endif /* CONFIG_DEV_PAGEMAP_OPS */ +#endif /* CONFIG_FS_DAX */ diff --git a/mm/migrate.c b/mm/migrate.c index e7d0b68d5dcb..af0534de618a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -338,14 +338,8 @@ static int expected_page_refs(struct address_space *mapping, struct page *page) { int expected_count = 1; - /* - * Device private pages have an extra refcount as they are - * ZONE_DEVICE pages. - */ - expected_count += is_device_private_page(page); if (mapping) expected_count += compound_nr(page) + page_has_private(page); - return expected_count; } diff --git a/mm/swap.c b/mm/swap.c index db8d0eea13d7..fc3b7989f5b2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -122,17 +122,9 @@ static void __put_compound_page(struct page *page) void __put_page(struct page *page) { - if (is_zone_device_page(page)) { - put_dev_pagemap(page->pgmap); - - /* - * The page belongs to the device that created pgmap. Do - * not return it to page allocator. - */ - return; - } - - if (unlikely(PageCompound(page))) + if (unlikely(is_zone_device_page(page))) + free_zone_device_page(page); + else if (unlikely(PageCompound(page))) __put_compound_page(page); else __put_single_page(page); @@ -933,7 +925,7 @@ void release_pages(struct page **pages, int nr) if (put_devmap_managed_page(page)) continue; if (put_page_testzero(page)) - put_dev_pagemap(page->pgmap); + free_zone_device_page(page); continue; } -- cgit v1.2.3-71-gd317 From dc4e8c07e9e2f69387579c49caca26ba239f7270 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Sun, 27 Feb 2022 20:25:45 +0800 Subject: ACPI: APEI: explicit init of HEST and GHES in apci_init() From commit e147133a42cb ("ACPI / APEI: Make hest.c manage the estatus memory pool") was merged, ghes_init() relies on acpi_hest_init() to manage the estatus memory pool. On the other hand, ghes_init() relies on sdei_init() to detect the SDEI version and (un)register events. The dependencies are as follows: ghes_init() => acpi_hest_init() => acpi_bus_init() => acpi_init() ghes_init() => sdei_init() HEST is not PCI-specific and initcall ordering is implicit and not well-defined within a level. Based on above, remove acpi_hest_init() from acpi_pci_root_init() and convert ghes_init() and sdei_init() from initcalls to explicit calls in the following order: acpi_hest_init() ghes_init() sdei_init() Signed-off-by: Shuai Xue Signed-off-by: Rafael J. Wysocki --- drivers/acpi/apei/ghes.c | 19 ++++++++----------- drivers/acpi/bus.c | 2 ++ drivers/acpi/pci_root.c | 3 --- drivers/firmware/Kconfig | 1 + drivers/firmware/arm_sdei.c | 13 ++----------- include/acpi/apei.h | 4 +++- include/linux/arm_sdei.h | 2 ++ 7 files changed, 18 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0c5c9acc6254..aadc0a972f18 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = { .remove = ghes_remove, }; -static int __init ghes_init(void) +void __init ghes_init(void) { int rc; + sdei_init(); + if (acpi_disabled) - return -ENODEV; + return; switch (hest_disable) { case HEST_NOT_FOUND: - return -ENODEV; + return; case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); - return -EINVAL; + return; default: break; } if (ghes_disable) { pr_info(GHES_PFX "GHES is not enabled!\n"); - return -EINVAL; + return; } ghes_nmi_init_cxt(); rc = platform_driver_register(&ghes_platform_driver); if (rc) - goto err; + return; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) @@ -1494,9 +1496,4 @@ static int __init ghes_init(void) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); else pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); - - return 0; -err: - return rc; } -device_initcall(ghes_init); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 07f604832fd6..3f403db20f69 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1331,6 +1331,8 @@ static int __init acpi_init(void) pci_mmcfg_late_init(); acpi_iort_init(); + acpi_hest_init(); + ghes_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index b76db99cced3..6f9e75d14808 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -22,8 +22,6 @@ #include #include #include -#include /* for acpi_hest_init() */ - #include "internal.h" #define ACPI_PCI_ROOT_CLASS "pci_bridge" @@ -943,7 +941,6 @@ out_release_info: void __init acpi_pci_root_init(void) { - acpi_hest_init(); if (acpi_pci_disabled) return; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 75cb91055c17..e5cfb01353d8 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 + depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index a7e762c352f9..1e1a51510e83 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1059,14 +1059,14 @@ static bool __init sdei_present_acpi(void) return true; } -static int __init sdei_init(void) +void __init sdei_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&sdei_driver); if (ret || !sdei_present_acpi()) - return ret; + return; pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 0); @@ -1076,17 +1076,8 @@ static int __init sdei_init(void) pr_info("Failed to register ACPI:SDEI platform device %d\n", ret); } - - return ret; } -/* - * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register - * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised - * by device_initcall(). We want to be called in the middle. - */ -subsys_initcall_sync(sdei_init); - int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { diff --git a/include/acpi/apei.h b/include/acpi/apei.h index ece0a8af2bae..4e60dd73c3bb 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -27,14 +27,16 @@ extern int hest_disable; extern int erst_disable; #ifdef CONFIG_ACPI_APEI_GHES extern bool ghes_disable; +void __init ghes_init(void); #else #define ghes_disable 1 +static inline void ghes_init(void) { } #endif #ifdef CONFIG_ACPI_APEI void __init acpi_hest_init(void); #else -static inline void acpi_hest_init(void) { return; } +static inline void acpi_hest_init(void) { } #endif int erst_write(const struct cper_record_header *record); diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 0a241c5c911d..14dc461b0e82 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -46,9 +46,11 @@ int sdei_unregister_ghes(struct ghes *ghes); /* For use by arch code when CPU hotplug notifiers are not appropriate. */ int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); +void __init sdei_init(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } +static inline void sdei_init(void) { } #endif /* CONFIG_ARM_SDE_INTERFACE */ -- cgit v1.2.3-71-gd317 From 31b9887c7258ca47d9c665a80f19f006c86756b1 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Mon, 17 Jan 2022 17:48:15 +0000 Subject: i3c: remove i2c board info from i2c_dev_desc I2C board info is only required during adapter setup so there is no requirement to keeping a pointer to it once running. To support dynamic device addition we can't rely on board info - user-space creation through sysfs won't have a boardinfo. Cc: Alexandre Belloni Signed-off-by: Jamie Iles Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220117174816.1963463-2-quic_jiles@quicinc.com --- drivers/i3c/master.c | 18 ++++++++++-------- include/linux/i3c/master.h | 1 - 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index dfe18dcd008d..9a09109da8cc 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -609,7 +609,7 @@ static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev) static struct i2c_dev_desc * i3c_master_alloc_i2c_dev(struct i3c_master_controller *master, - const struct i2c_dev_boardinfo *boardinfo) + u16 addr, u8 lvr) { struct i2c_dev_desc *dev; @@ -618,9 +618,8 @@ i3c_master_alloc_i2c_dev(struct i3c_master_controller *master, return ERR_PTR(-ENOMEM); dev->common.master = master; - dev->boardinfo = boardinfo; - dev->addr = boardinfo->base.addr; - dev->lvr = boardinfo->lvr; + dev->addr = addr; + dev->lvr = lvr; return dev; } @@ -694,7 +693,7 @@ i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master, struct i2c_dev_desc *dev; i3c_bus_for_each_i2cdev(&master->bus, dev) { - if (dev->boardinfo->base.addr == addr) + if (dev->addr == addr) return dev; } @@ -1689,7 +1688,9 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) i2cboardinfo->base.addr, I3C_ADDR_SLOT_I2C_DEV); - i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo); + i2cdev = i3c_master_alloc_i2c_dev(master, + i2cboardinfo->base.addr, + i2cboardinfo->lvr); if (IS_ERR(i2cdev)) { ret = PTR_ERR(i2cdev); goto err_detach_devs; @@ -2175,6 +2176,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master) { struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master); struct i2c_dev_desc *i2cdev; + struct i2c_dev_boardinfo *i2cboardinfo; int ret; adap->dev.parent = master->dev.parent; @@ -2194,8 +2196,8 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master) * We silently ignore failures here. The bus should keep working * correctly even if one or more i2c devices are not registered. */ - i3c_bus_for_each_i2cdev(&master->bus, i2cdev) - i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base); + list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) + i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base); return 0; } diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 9cb39d901cd5..604a126b78c8 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -85,7 +85,6 @@ struct i2c_dev_boardinfo { */ struct i2c_dev_desc { struct i3c_i2c_dev_desc common; - const struct i2c_dev_boardinfo *boardinfo; struct i2c_client *dev; u16 addr; u8 lvr; -- cgit v1.2.3-71-gd317 From 98b4d7a4e7374a44c4afd9f08330e72f6ad0d644 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:40 +0800 Subject: net: dev: use kfree_skb_reason() for sch_handle_egress() Replace kfree_skb() used in sch_handle_egress() with kfree_skb_reason(). The drop reason SKB_DROP_REASON_TC_EGRESS is introduced. Considering the code path of tc egerss, we make it distinct with the drop reason of SKB_DROP_REASON_QDISC_DROP in the next commit. Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/trace/events/skb.h | 1 + net/core/dev.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5445860e1ba6..1ffe64616741 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -394,6 +394,7 @@ enum skb_drop_reason { * entry is full */ SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ + SKB_DROP_REASON_TC_EGRESS, /* dropped in TC egress HOOK */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 1977f301260d..53755e8191a1 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -45,6 +45,7 @@ EM(SKB_DROP_REASON_NEIGH_FAILED, NEIGH_FAILED) \ EM(SKB_DROP_REASON_NEIGH_QUEUEFULL, NEIGH_QUEUEFULL) \ EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ + EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index 96bcc003e018..ef0b1992cf13 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3889,7 +3889,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) case TC_ACT_SHOT: mini_qdisc_qstats_cpu_drop(miniq); *ret = NET_XMIT_DROP; - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS); return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: -- cgit v1.2.3-71-gd317 From 215b0f1963d4e34fccac6992b3debe26f78a6eb8 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:41 +0800 Subject: net: skb: introduce the function kfree_skb_list_reason() To report reasons of skb drops, introduce the function kfree_skb_list_reason() and make kfree_skb_list() an inline call to it. This function will be used in the next commit in __dev_xmit_skb(). Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 8 +++++++- net/core/skbuff.c | 7 ++++--- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1ffe64616741..1f2de153755c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1202,10 +1202,16 @@ static inline void kfree_skb(struct sk_buff *skb) } void skb_release_head_state(struct sk_buff *skb); -void kfree_skb_list(struct sk_buff *segs); +void kfree_skb_list_reason(struct sk_buff *segs, + enum skb_drop_reason reason); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); +static inline void kfree_skb_list(struct sk_buff *segs) +{ + kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED); +} + #ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); #else diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 23f3ba343661..10bde7c6db44 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -777,16 +777,17 @@ void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) } EXPORT_SYMBOL(kfree_skb_reason); -void kfree_skb_list(struct sk_buff *segs) +void kfree_skb_list_reason(struct sk_buff *segs, + enum skb_drop_reason reason) { while (segs) { struct sk_buff *next = segs->next; - kfree_skb(segs); + kfree_skb_reason(segs, reason); segs = next; } } -EXPORT_SYMBOL(kfree_skb_list); +EXPORT_SYMBOL(kfree_skb_list_reason); /* Dump skb information and contents. * -- cgit v1.2.3-71-gd317 From 7faef0547f4c29031a68d058918b031a8e520d49 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:42 +0800 Subject: net: dev: add skb drop reasons to __dev_xmit_skb() Add reasons for skb drops to __dev_xmit_skb() by replacing kfree_skb_list() with kfree_skb_list_reason(). The drop reason of SKB_DROP_REASON_QDISC_DROP is introduced for qdisc enqueue fails. Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/trace/events/skb.h | 1 + net/core/dev.c | 5 +++-- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1f2de153755c..769d54ba2f3b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -395,6 +395,10 @@ enum skb_drop_reason { */ SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ SKB_DROP_REASON_TC_EGRESS, /* dropped in TC egress HOOK */ + SKB_DROP_REASON_QDISC_DROP, /* dropped by qdisc when packet + * outputting (failed to enqueue to + * current qdisc) + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 53755e8191a1..dbf3e2e3c1b4 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -46,6 +46,7 @@ EM(SKB_DROP_REASON_NEIGH_QUEUEFULL, NEIGH_QUEUEFULL) \ EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ + EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index ef0b1992cf13..e2c107643ee5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3759,7 +3759,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, no_lock_out: if (unlikely(to_free)) - kfree_skb_list(to_free); + kfree_skb_list_reason(to_free, + SKB_DROP_REASON_QDISC_DROP); return rc; } @@ -3814,7 +3815,7 @@ no_lock_out: } spin_unlock(root_lock); if (unlikely(to_free)) - kfree_skb_list(to_free); + kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP); if (unlikely(contended)) spin_unlock(&q->busylock); return rc; -- cgit v1.2.3-71-gd317 From 44f0bd40803c0e04f1c8cd59df3c7acce783ae9c Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:43 +0800 Subject: net: dev: use kfree_skb_reason() for enqueue_to_backlog() Replace kfree_skb() used in enqueue_to_backlog() with kfree_skb_reason(). The skb rop reason SKB_DROP_REASON_CPU_BACKLOG is introduced for the case of failing to enqueue the skb to the per CPU backlog queue. The further reason can be backlog queue full or RPS flow limition, and I think we needn't to make further distinctions. Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ++++++ include/trace/events/skb.h | 1 + net/core/dev.c | 5 ++++- 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 769d54ba2f3b..44ecbfff2b69 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -399,6 +399,12 @@ enum skb_drop_reason { * outputting (failed to enqueue to * current qdisc) */ + SKB_DROP_REASON_CPU_BACKLOG, /* failed to enqueue the skb to + * the per CPU backlog queue. This + * can be caused by backlog queue + * full (see netdev_max_backlog in + * net.rst) or RPS flow limit + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index dbf3e2e3c1b4..3bb90ca893ae 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -47,6 +47,7 @@ EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ + EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index e2c107643ee5..460ac941a631 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4570,10 +4570,12 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) { + enum skb_drop_reason reason; struct softnet_data *sd; unsigned long flags; unsigned int qlen; + reason = SKB_DROP_REASON_NOT_SPECIFIED; sd = &per_cpu(softnet_data, cpu); rps_lock_irqsave(sd, &flags); @@ -4596,13 +4598,14 @@ enqueue: napi_schedule_rps(sd); goto enqueue; } + reason = SKB_DROP_REASON_CPU_BACKLOG; drop: sd->dropped++; rps_unlock_irq_restore(sd, &flags); atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); + kfree_skb_reason(skb, reason); return NET_RX_DROP; } -- cgit v1.2.3-71-gd317 From 7e726ed81e1ddd5fdc431e02b94fcfe2a9876d42 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:44 +0800 Subject: net: dev: use kfree_skb_reason() for do_xdp_generic() Replace kfree_skb() used in do_xdp_generic() with kfree_skb_reason(). The drop reason SKB_DROP_REASON_XDP is introduced for this case. Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/trace/events/skb.h | 1 + net/core/dev.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 44ecbfff2b69..7a38d0f90cef 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -405,6 +405,7 @@ enum skb_drop_reason { * full (see netdev_max_backlog in * net.rst) or RPS flow limit */ + SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 3bb90ca893ae..8c4c343c830f 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -48,6 +48,7 @@ EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ + EM(SKB_DROP_REASON_XDP, XDP) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index 460ac941a631..f449834a63df 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4825,7 +4825,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) } return XDP_PASS; out_redir: - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_XDP); return XDP_DROP; } EXPORT_SYMBOL_GPL(do_xdp_generic); -- cgit v1.2.3-71-gd317 From a568aff26ac03ee9eb1482683514914a5ec3b4c3 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:45 +0800 Subject: net: dev: use kfree_skb_reason() for sch_handle_ingress() Replace kfree_skb() used in sch_handle_ingress() with kfree_skb_reason(). Following drop reasons are introduced: SKB_DROP_REASON_TC_INGRESS Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/trace/events/skb.h | 1 + net/core/dev.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7a38d0f90cef..6b14b2d297d2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -406,6 +406,7 @@ enum skb_drop_reason { * net.rst) or RPS flow limit */ SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ + SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 8c4c343c830f..514dd2de8776 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -49,6 +49,7 @@ EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ EM(SKB_DROP_REASON_XDP, XDP) \ + EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index f449834a63df..ba7580ccee94 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5041,7 +5041,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, break; case TC_ACT_SHOT: mini_qdisc_qstats_cpu_drop(miniq); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: -- cgit v1.2.3-71-gd317 From 6c2728b7c14164928cb7cb9c847dead101b2d503 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Fri, 4 Mar 2022 14:00:46 +0800 Subject: net: dev: use kfree_skb_reason() for __netif_receive_skb_core() Add reason for skb drops to __netif_receive_skb_core() when packet_type not found to handle the skb. For this purpose, the drop reason SKB_DROP_REASON_PTYPE_ABSENT is introduced. Take ether packets for example, this case mainly happens when L3 protocol is not supported. Signed-off-by: Menglong Dong Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ include/trace/events/skb.h | 1 + net/core/dev.c | 8 +++++--- 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6b14b2d297d2..2be263184d1e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -407,6 +407,11 @@ enum skb_drop_reason { */ SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */ + SKB_DROP_REASON_PTYPE_ABSENT, /* not packet_type found to handle + * the skb. For an etner packet, + * this means that L3 protocol is + * not supported + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 514dd2de8776..c0769d943f8e 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -50,6 +50,7 @@ EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ EM(SKB_DROP_REASON_XDP, XDP) \ EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \ + EM(SKB_DROP_REASON_PTYPE_ABSENT, PTYPE_ABSENT) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/net/core/dev.c b/net/core/dev.c index ba7580ccee94..ba69ddf85af6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5358,11 +5358,13 @@ check_vlan_id: *ppt_prev = pt_prev; } else { drop: - if (!deliver_exact) + if (!deliver_exact) { atomic_long_inc(&skb->dev->rx_dropped); - else + kfree_skb_reason(skb, SKB_DROP_REASON_PTYPE_ABSENT); + } else { atomic_long_inc(&skb->dev->rx_nohandler); - kfree_skb(skb); + kfree_skb(skb); + } /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) */ -- cgit v1.2.3-71-gd317 From 838d6d3461db0fdbf33fc5f8a69c27b50b4a46da Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 14 Jan 2022 14:56:15 -0500 Subject: virtio: unexport virtio_finalize_features virtio_finalize_features is only used internally within virtio. No reason to export it. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck Acked-by: Jason Wang --- drivers/virtio/virtio.c | 3 +-- include/linux/virtio.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 00ac9db792a4..d891b0a354b0 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -166,7 +166,7 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +static int virtio_finalize_features(struct virtio_device *dev) { int ret = dev->config->finalize_features(dev); unsigned status; @@ -202,7 +202,6 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); void virtio_reset_device(struct virtio_device *dev) { diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 72292a62cd90..5464f398912a 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -133,7 +133,6 @@ bool is_virtio_device(struct device *dev); void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); -int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); -- cgit v1.2.3-71-gd317 From 4fa59ede95195f267101a1b8916992cf3f245cdb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 14 Jan 2022 14:58:41 -0500 Subject: virtio: acknowledge all features before access The feature negotiation was designed in a way that makes it possible for devices to know which config fields will be accessed by drivers. This is broken since commit 404123c2db79 ("virtio: allow drivers to validate features") with fallout in at least block and net. We have a partial work-around in commit 2f9a174f918e ("virtio: write back F_VERSION_1 before validate") which at least lets devices find out which format should config space have, but this is a partial fix: guests should not access config space without acknowledging features since otherwise we'll never be able to change the config space format. To fix, split finalize_features from virtio_finalize_features and call finalize_features with all feature bits before validation, and then - if validation changed any bits - once again after. Since virtio_finalize_features no longer writes out features rename it to virtio_features_ok - since that is what it does: checks that features are ok with the device. As a side effect, this also reduces the amount of hypervisor accesses - we now only acknowledge features once unless we are clearing any features when validating (which is uncommon). IRC I think that this was more or less always the intent in the spec but unfortunately the way the spec is worded does not say this explicitly, I plan to address this at the spec level, too. Acked-by: Jason Wang Cc: stable@vger.kernel.org Fixes: 404123c2db79 ("virtio: allow drivers to validate features") Fixes: 2f9a174f918e ("virtio: write back F_VERSION_1 before validate") Cc: "Halil Pasic" Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio.c | 39 ++++++++++++++++++++++----------------- include/linux/virtio_config.h | 3 ++- 2 files changed, 24 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index d891b0a354b0..d6396be0ea83 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -166,14 +166,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -static int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -244,17 +243,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -265,13 +253,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -495,7 +496,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 4d107ad31149..dafdc7f48c01 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -64,8 +64,9 @@ struct virtio_shm_region { * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * This gives the final feature bits for the device: it can change + * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. + * Note: despite the name this can be called any number of times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device -- cgit v1.2.3-71-gd317 From bf9ad37dc8a30cce22ae95d6c2ca6abf8731d305 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 14 Jul 2015 14:26:34 +0200 Subject: signal, x86: Delay calling signals in atomic on RT enabled kernels On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using a per CPU debug stack defined by the IST. If we schedule out, another task can come in and use the same stack and cause the stack to be corrupted and crash the kernel on return. When CONFIG_PREEMPT_RT is enabled, spinlock_t locks become sleeping, and one of these is the spin lock used in signal handling. Some of the debug code (int3) causes do_trap() to send a signal. This function calls a spinlock_t lock that has been converted to a sleeping lock. If this happens, the above issues with the corrupted stack is possible. Instead of calling the signal right away, for PREEMPT_RT and x86, the signal information is stored on the stacks task_struct and TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume code will send the signal when preemption is enabled. [ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT to ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] [bigeasy: Add on 32bit as per Yang Shi, minor rewording. ] [ tglx: Use a config option ] Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/Ygq5aBB/qMQw6aP5@linutronix.de --- arch/x86/Kconfig | 1 + include/linux/sched.h | 3 +++ kernel/Kconfig.preempt | 12 +++++++++++- kernel/entry/common.c | 14 ++++++++++++++ kernel/signal.c | 40 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9f5bd41bf660..d557ac29b6cd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -120,6 +120,7 @@ config X86 select ARCH_WANTS_NO_INSTR select ARCH_WANT_HUGE_PMD_SHARE select ARCH_WANT_LD_ORPHAN_WARN + select ARCH_WANTS_RT_DELAYED_SIGNALS select ARCH_WANTS_THP_SWAP if X86_64 select ARCH_HAS_PARANOID_L1D_FLUSH select BUILDTIME_TABLE_SORT diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..098e37fd770a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1087,6 +1087,9 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; +#ifdef CONFIG_RT_DELAYED_SIGNALS + struct kernel_siginfo forced_info; +#endif unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index ce77f0265660..5644abd5f8a8 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -132,4 +132,14 @@ config SCHED_CORE which is the likely usage by Linux distributions, there should be no measurable impact on performance. - +config ARCH_WANTS_RT_DELAYED_SIGNALS + bool + help + This option is selected by architectures where raising signals + can happen in atomic contexts on PREEMPT_RT enabled kernels. This + option delays raising the signal until the return to user space + loop where it is also delivered. X86 requires this to deliver + signals from trap handlers which run on IST stacks. + +config RT_DELAYED_SIGNALS + def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS diff --git a/kernel/entry/common.c b/kernel/entry/common.c index bad713684c2e..0543a2c92f20 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -148,6 +148,18 @@ static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); } +#ifdef CONFIG_RT_DELAYED_SIGNALS +static inline void raise_delayed_signal(void) +{ + if (unlikely(current->forced_info.si_signo)) { + force_sig_info(¤t->forced_info); + current->forced_info.si_signo = 0; + } +} +#else +static inline void raise_delayed_signal(void) { } +#endif + static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) { @@ -162,6 +174,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_NEED_RESCHED) schedule(); + raise_delayed_signal(); + if (ti_work & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/kernel/signal.c b/kernel/signal.c index 9b04631acde8..e93de6daa188 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1307,6 +1307,43 @@ enum sig_handler { HANDLER_EXIT, /* Only visible as the process exit code */ }; +/* + * On some archictectures, PREEMPT_RT has to delay sending a signal from a + * trap since it cannot enable preemption, and the signal code's + * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME + * which will send the signal on exit of the trap. + */ +#ifdef CONFIG_RT_DELAYED_SIGNALS +static inline bool force_sig_delayed(struct kernel_siginfo *info, + struct task_struct *t) +{ + if (!in_atomic()) + return false; + + if (WARN_ON_ONCE(t->forced_info.si_signo)) + return true; + + if (is_si_special(info)) { + WARN_ON_ONCE(info != SEND_SIG_PRIV); + t->forced_info.si_signo = info->si_signo; + t->forced_info.si_errno = 0; + t->forced_info.si_code = SI_KERNEL; + t->forced_info.si_pid = 0; + t->forced_info.si_uid = 0; + } else { + t->forced_info = *info; + } + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); + return true; +} +#else +static inline bool force_sig_delayed(struct kernel_siginfo *info, + struct task_struct *t) +{ + return false; +} +#endif + /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. @@ -1327,6 +1364,9 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, struct k_sigaction *action; int sig = info->si_signo; + if (force_sig_delayed(info, t)) + return 0; + spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; -- cgit v1.2.3-71-gd317 From 402e6688a7df482cc57be0b0dd5b443d995073fb Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:48 +0800 Subject: iommu/vt-d: Remove intel_iommu::domains The "domains" field of the intel_iommu structure keeps the mapping of domain_id to dmar_domain. This information is not used anywhere. Remove and cleanup it to avoid unnecessary memory consumption. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 68 ++------------------------------------------- include/linux/intel-iommu.h | 1 - 2 files changed, 3 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b549172e88ef..e3b04d5d87b0 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -455,36 +455,6 @@ __setup("intel_iommu=", intel_iommu_setup); static struct kmem_cache *iommu_domain_cache; static struct kmem_cache *iommu_devinfo_cache; -static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - domains = iommu->domains[idx]; - if (!domains) - return NULL; - - return domains[did & 0xff]; -} - -static void set_iommu_domain(struct intel_iommu *iommu, u16 did, - struct dmar_domain *domain) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - if (!iommu->domains[idx]) { - size_t size = 256 * sizeof(struct dmar_domain *); - iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); - } - - domains = iommu->domains[idx]; - if (WARN_ON(!domains)) - return; - else - domains[did & 0xff] = domain; -} - void *alloc_pgtable_page(int node) { struct page *page; @@ -1751,8 +1721,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) DMA_TLB_DSI_FLUSH); if (!cap_caching_mode(iommu->cap)) - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), - 0, MAX_AGAW_PFN_WIDTH); + iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); } } @@ -1815,7 +1784,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu) static int iommu_init_domains(struct intel_iommu *iommu) { u32 ndomains; - size_t size; ndomains = cap_ndoms(iommu->cap); pr_debug("%s: Number of Domains supported <%d>\n", @@ -1827,24 +1795,6 @@ static int iommu_init_domains(struct intel_iommu *iommu) if (!iommu->domain_ids) return -ENOMEM; - size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); - iommu->domains = kzalloc(size, GFP_KERNEL); - - if (iommu->domains) { - size = 256 * sizeof(struct dmar_domain *); - iommu->domains[0] = kzalloc(size, GFP_KERNEL); - } - - if (!iommu->domains || !iommu->domains[0]) { - pr_err("%s: Allocating domain array failed\n", - iommu->name); - bitmap_free(iommu->domain_ids); - kfree(iommu->domains); - iommu->domain_ids = NULL; - iommu->domains = NULL; - return -ENOMEM; - } - /* * If Caching mode is set, then invalid translations are tagged * with domain-id 0, hence we need to pre-allocate it. We also @@ -1871,7 +1821,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) struct device_domain_info *info, *tmp; unsigned long flags; - if (!iommu->domains || !iommu->domain_ids) + if (!iommu->domain_ids) return; spin_lock_irqsave(&device_domain_lock, flags); @@ -1892,15 +1842,8 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { - if ((iommu->domains) && (iommu->domain_ids)) { - int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; - int i; - - for (i = 0; i < elems; i++) - kfree(iommu->domains[i]); - kfree(iommu->domains); + if (iommu->domain_ids) { bitmap_free(iommu->domain_ids); - iommu->domains = NULL; iommu->domain_ids = NULL; } @@ -1978,11 +1921,8 @@ static int domain_attach_iommu(struct dmar_domain *domain, } set_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, domain); - domain->iommu_did[iommu->seq_id] = num; domain->nid = iommu->node; - domain_update_iommu_cap(domain); } @@ -2001,8 +1941,6 @@ static void domain_detach_iommu(struct dmar_domain *domain, if (domain->iommu_refcnt[iommu->seq_id] == 0) { num = domain->iommu_did[iommu->seq_id]; clear_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, NULL); - domain_update_iommu_cap(domain); domain->iommu_did[iommu->seq_id] = 0; } diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 5cfda90b2cca..8c7591b5f3e2 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -578,7 +578,6 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ -- cgit v1.2.3-71-gd317 From 586081d3f6b13ec9dfdfdf3d7842a688b376fa5e Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:52 +0800 Subject: iommu/vt-d: Remove DEFER_DEVICE_DOMAIN_INFO Allocate and set the per-device iommu private data during iommu device probe. This makes the per-device iommu private data always available during iommu_probe_device() and iommu_release_device(). With this changed, the dummy DEFER_DEVICE_DOMAIN_INFO pointer could be removed. The wrappers for getting the private data and domain are also cleaned. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-6-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/debugfs.c | 3 +- drivers/iommu/intel/iommu.c | 186 +++++++++++++++--------------------------- drivers/iommu/intel/pasid.c | 12 +-- drivers/iommu/intel/svm.c | 6 +- include/linux/intel-iommu.h | 2 - 5 files changed, 79 insertions(+), 130 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index db7a0ca73626..ed796eea4581 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -344,7 +344,8 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, static int show_device_domain_translation(struct device *dev, void *data) { - struct dmar_domain *domain = find_domain(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *domain = info->domain; struct seq_file *m = data; u64 path[6] = { 0 }; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 51db26d8606c..d9965e72d9a8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -342,21 +342,6 @@ static int iommu_skip_te_disable; int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); -#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) -struct device_domain_info *get_domain_info(struct device *dev) -{ - struct device_domain_info *info; - - if (!dev) - return NULL; - - info = dev_iommu_priv_get(dev); - if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO)) - return NULL; - - return info; -} - DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); @@ -741,11 +726,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, return &context[devfn]; } -static bool attach_deferred(struct device *dev) -{ - return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO; -} - /** * is_downstream_to_pci_bridge - test if a device belongs to the PCI * sub-hierarchy of a candidate PCI-PCI bridge @@ -2432,15 +2412,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); } -static inline void unlink_domain_info(struct device_domain_info *info) -{ - assert_spin_locked(&device_domain_lock); - list_del(&info->link); - list_del(&info->global); - if (info->dev) - dev_iommu_priv_set(info->dev, NULL); -} - static void domain_remove_dev_info(struct dmar_domain *domain) { struct device_domain_info *info, *tmp; @@ -2452,24 +2423,6 @@ static void domain_remove_dev_info(struct dmar_domain *domain) spin_unlock_irqrestore(&device_domain_lock, flags); } -struct dmar_domain *find_domain(struct device *dev) -{ - struct device_domain_info *info; - - if (unlikely(!dev || !dev->iommu)) - return NULL; - - if (unlikely(attach_deferred(dev))) - return NULL; - - /* No lock here, assumes no domain exit in normal case */ - info = get_domain_info(dev); - if (likely(info)) - return info->domain; - - return NULL; -} - static inline struct device_domain_info * dmar_search_domain_by_dev_info(int segment, int bus, int devfn) { @@ -2530,66 +2483,20 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, struct device *dev, struct dmar_domain *domain) { - struct device_domain_info *info; + struct device_domain_info *info = dev_iommu_priv_get(dev); unsigned long flags; int ret; - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; - - if (!dev_is_real_dma_subdevice(dev)) { - info->bus = bus; - info->devfn = devfn; - info->segment = iommu->segment; - } else { - struct pci_dev *pdev = to_pci_dev(dev); - - info->bus = pdev->bus->number; - info->devfn = pdev->devfn; - info->segment = pci_domain_nr(pdev->bus); - } - - info->dev = dev; - info->domain = domain; - info->iommu = iommu; - - if (dev && dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(info->dev); - - if (ecap_dev_iotlb_support(iommu->ecap) && - pci_ats_supported(pdev) && - dmar_find_matched_atsr_unit(pdev)) - info->ats_supported = 1; - - if (sm_supported(iommu)) { - if (pasid_supported(iommu)) { - int features = pci_pasid_features(pdev); - if (features >= 0) - info->pasid_supported = features | 1; - } - - if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_pri_supported(pdev)) - info->pri_supported = 1; - } - } - spin_lock_irqsave(&device_domain_lock, flags); + info->domain = domain; spin_lock(&iommu->lock); ret = domain_attach_iommu(domain, iommu); spin_unlock(&iommu->lock); - if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); - kfree(info); return NULL; } - list_add(&info->link, &domain->devices); - list_add(&info->global, &device_domain_list); - if (dev) - dev_iommu_priv_set(dev, info); spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ @@ -4336,13 +4243,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) intel_pasid_free_table(info->dev); } - unlink_domain_info(info); + list_del(&info->link); spin_lock_irqsave(&iommu->lock, flags); domain_detach_iommu(domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); - - kfree(info); } static void dmar_remove_one_dev_info(struct device *dev) @@ -4351,7 +4256,7 @@ static void dmar_remove_one_dev_info(struct device *dev) unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (info) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); @@ -4475,10 +4380,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { - struct dmar_domain *old_domain; + struct device_domain_info *info = dev_iommu_priv_get(dev); - old_domain = find_domain(dev); - if (old_domain) + if (info->domain) dmar_remove_one_dev_info(dev); } @@ -4642,28 +4546,73 @@ static bool intel_iommu_capable(enum iommu_cap cap) static struct iommu_device *intel_iommu_probe_device(struct device *dev) { + struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL; + struct device_domain_info *info; struct intel_iommu *iommu; + unsigned long flags; + u8 bus, devfn; - iommu = device_to_iommu(dev, NULL, NULL); + iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return ERR_PTR(-ENODEV); - if (translation_pre_enabled(iommu)) - dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + if (dev_is_real_dma_subdevice(dev)) { + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; + info->segment = pci_domain_nr(pdev->bus); + } else { + info->bus = bus; + info->devfn = devfn; + info->segment = iommu->segment; + } + + info->dev = dev; + info->iommu = iommu; + if (dev_is_pci(dev)) { + if (ecap_dev_iotlb_support(iommu->ecap) && + pci_ats_supported(pdev) && + dmar_find_matched_atsr_unit(pdev)) + info->ats_supported = 1; + + if (sm_supported(iommu)) { + if (pasid_supported(iommu)) { + int features = pci_pasid_features(pdev); + + if (features >= 0) + info->pasid_supported = features | 1; + } + + if (info->ats_supported && ecap_prs(iommu->ecap) && + pci_pri_supported(pdev)) + info->pri_supported = 1; + } + } + + spin_lock_irqsave(&device_domain_lock, flags); + list_add(&info->global, &device_domain_list); + dev_iommu_priv_set(dev, info); + spin_unlock_irqrestore(&device_domain_lock, flags); return &iommu->iommu; } static void intel_iommu_release_device(struct device *dev) { - struct intel_iommu *iommu; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu) - return; + struct device_domain_info *info = dev_iommu_priv_get(dev); + unsigned long flags; dmar_remove_one_dev_info(dev); + spin_lock_irqsave(&device_domain_lock, flags); + dev_iommu_priv_set(dev, NULL); + list_del(&info->global); + spin_unlock_irqrestore(&device_domain_lock, flags); + + kfree(info); set_dma_ops(dev, NULL); } @@ -4732,14 +4681,14 @@ static void intel_iommu_get_resv_regions(struct device *device, int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) { - struct device_domain_info *info; + struct device_domain_info *info = dev_iommu_priv_get(dev); struct context_entry *context; struct dmar_domain *domain; unsigned long flags; u64 ctx_lo; int ret; - domain = find_domain(dev); + domain = info->domain; if (!domain) return -EINVAL; @@ -4747,8 +4696,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) spin_lock(&iommu->lock); ret = -EINVAL; - info = get_domain_info(dev); - if (!info || !info->pasid_supported) + if (!info->pasid_supported) goto out; context = iommu_context_addr(iommu, info->bus, info->devfn, 0); @@ -4790,7 +4738,7 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) static int intel_iommu_enable_sva(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu; int ret; @@ -4819,7 +4767,7 @@ static int intel_iommu_enable_sva(struct device *dev) static int intel_iommu_disable_sva(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; int ret; @@ -4832,7 +4780,7 @@ static int intel_iommu_disable_sva(struct device *dev) static int intel_iommu_enable_iopf(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); if (info && info->pri_supported) return 0; @@ -4872,7 +4820,9 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) static bool intel_iommu_is_attach_deferred(struct device *dev) { - return attach_deferred(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); + + return translation_pre_enabled(info->iommu) && !info->domain; } /* diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 10fb82ea467d..f8d215d85695 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -150,7 +150,7 @@ int intel_pasid_alloc_table(struct device *dev) int size; might_sleep(); - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) return -EINVAL; @@ -197,7 +197,7 @@ void intel_pasid_free_table(struct device *dev) struct pasid_entry *table; int i, max_pde; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !dev_is_pci(dev) || !info->pasid_table) return; @@ -223,7 +223,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev) { struct device_domain_info *info; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info) return NULL; @@ -234,7 +234,7 @@ static int intel_pasid_get_dev_max_id(struct device *dev) { struct device_domain_info *info; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !info->pasid_table) return 0; @@ -254,7 +254,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) return NULL; dir = pasid_table->table; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); dir_index = pasid >> PASID_PDE_SHIFT; index = pasid & PASID_PTE_MASK; @@ -487,7 +487,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, struct device_domain_info *info; u16 sid, qdep, pfsid; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !info->ats_enabled) return; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index d04c83dd3a58..944e2408b6d2 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -200,7 +200,7 @@ static void __flush_svm_range_dev(struct intel_svm *svm, unsigned long address, unsigned long pages, int ih) { - struct device_domain_info *info = get_domain_info(sdev->dev); + struct device_domain_info *info = dev_iommu_priv_get(sdev->dev); if (WARN_ON(!pages)) return; @@ -337,7 +337,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, struct mm_struct *mm, unsigned int flags) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); unsigned long iflags, sflags; struct intel_svm_dev *sdev; struct intel_svm *svm; @@ -545,7 +545,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid) u16 sid, did; int qdep; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev))) return; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 8c7591b5f3e2..03f1134fc2fe 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -733,8 +733,6 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); -struct dmar_domain *find_domain(struct device *dev); -struct device_domain_info *get_domain_info(struct device *dev); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM -- cgit v1.2.3-71-gd317 From 2852631d96a643e0b21a9b14ad38caf465d7225f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 1 Mar 2022 10:01:56 +0800 Subject: iommu/vt-d: Move intel_iommu_ops to header file Compiler is not happy about hidden declaration of intel_iommu_ops. .../iommu.c:414:24: warning: symbol 'intel_iommu_ops' was not declared. Should it be static? Move declaration to header file to make compiler happy. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220207141240.8253-1-andriy.shevchenko@linux.intel.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 2 -- include/linux/intel-iommu.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 6d10be50ec30..4de960834a1b 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -66,8 +66,6 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); -extern const struct iommu_ops intel_iommu_ops; - static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 03f1134fc2fe..4909d6c9ac21 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -783,6 +783,8 @@ bool context_present(struct context_entry *context); struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); +extern const struct iommu_ops intel_iommu_ops; + #ifdef CONFIG_INTEL_IOMMU extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); -- cgit v1.2.3-71-gd317 From 97f2f2c5317f55ae3440733a090a96a251da222b Mon Sep 17 00:00:00 2001 From: Yian Chen Date: Tue, 1 Mar 2022 10:01:59 +0800 Subject: iommu/vt-d: Enable ATS for the devices in SATC table Starting from Intel VT-d v3.2, Intel platform BIOS can provide additional SATC table structure. SATC table includes a list of SoC integrated devices that support ATC (Address translation cache). Enabling ATC (via ATS capability) can be a functional requirement for SATC device operation or optional to enhance device performance/functionality. This is determined by the bit of ATC_REQUIRED in SATC table. When IOMMU is working in scalable mode, software chooses to always enable ATS for every device in SATC table because Intel SoC devices in SATC table are trusted to use ATS. On the other hand, if IOMMU is in legacy mode, ATS of SATC capable devices can work transparently to software and be automatically enabled by IOMMU hardware. As the result, there is no need for software to enable ATS on these devices. This also removes dmar_find_matched_atsr_unit() helper as it becomes dead code now. Signed-off-by: Yian Chen Link: https://lore.kernel.org/r/20220222185416.1722611-1-yian.chen@intel.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-13-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 40 ++++++++++++++++++++++++++++++++++++++-- include/linux/intel-iommu.h | 1 - 2 files changed, 38 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2aa7afd90c3e..8662f5c10721 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3693,7 +3693,31 @@ static void intel_iommu_free_dmars(void) } } -int dmar_find_matched_atsr_unit(struct pci_dev *dev) +static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev) +{ + struct dmar_satc_unit *satcu; + struct acpi_dmar_satc *satc; + struct device *tmp; + int i; + + dev = pci_physfn(dev); + rcu_read_lock(); + + list_for_each_entry_rcu(satcu, &dmar_satc_units, list) { + satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); + if (satc->segment != pci_domain_nr(dev->bus)) + continue; + for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) + if (to_pci_dev(tmp) == dev) + goto out; + } + satcu = NULL; +out: + rcu_read_unlock(); + return satcu; +} + +static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) { int i, ret = 1; struct pci_bus *bus; @@ -3701,8 +3725,20 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) struct device *tmp; struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; + struct dmar_satc_unit *satcu; dev = pci_physfn(dev); + satcu = dmar_find_matched_satc_unit(dev); + if (satcu) + /* + * This device supports ATS as it is in SATC table. + * When IOMMU is in legacy mode, enabling ATS is done + * automatically by HW for the device that requires + * ATS, hence OS should not enable this device ATS + * to avoid duplicated TLB invalidation. + */ + return !(satcu->atc_required && !sm_supported(iommu)); + for (bus = dev->bus; bus; bus = bus->parent) { bridge = bus->self; /* If it's an integrated device, allow ATS */ @@ -4550,7 +4586,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) if (dev_is_pci(dev)) { if (ecap_dev_iotlb_support(iommu->ecap) && pci_ats_supported(pdev) && - dmar_find_matched_atsr_unit(pdev)) + dmar_ats_supported(pdev, iommu)) info->ats_supported = 1; if (sm_supported(iommu)) { diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4909d6c9ac21..2f9891cb3d00 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -693,7 +693,6 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte) } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); -extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu); -- cgit v1.2.3-71-gd317 From 26c9da51949916b73d995a2c89412c346903273d Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Wed, 16 Feb 2022 13:42:23 +0530 Subject: remoteproc: Introduce sysfs_read_only flag The remoteproc framework provides sysfs interfaces for changing the firmware name and for starting/stopping a remote processor through the sysfs files 'state' and 'firmware'. The 'coredump' file is used to set the coredump configuration. The 'recovery' sysfs file can also be used similarly to control the error recovery state machine of a remoteproc. These interfaces are currently allowed irrespective of how the remoteprocs were booted (like remoteproc self auto-boot, remoteproc client-driven boot etc). These interfaces can adversely affect a remoteproc and its clients especially when a remoteproc is being controlled by a remoteproc client driver(s). Also, not all remoteproc drivers may want to support the sysfs interfaces by default. Add support to make the remoteproc sysfs files read only by introducing a state flag 'sysfs_read_only' that the individual remoteproc drivers can set based on their usage needs. The default behavior is to allow the sysfs operations as before. Implement attribute_group->is_visible() to make the sysfs entries read only when 'sysfs_read_only' flag is set. Signed-off-by: Puranjay Mohan Link: https://lore.kernel.org/r/20220216081224.9956-2-p-mohan@ti.com Signed-off-by: Mathieu Poirier --- drivers/remoteproc/remoteproc_sysfs.c | 19 ++++++++++++++++++- include/linux/remoteproc.h | 2 ++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index ea8b89f97d7b..abf0cd05d5e1 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -230,6 +230,22 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(name); +static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr, + int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct rproc *rproc = to_rproc(dev); + umode_t mode = attr->mode; + + if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr || + attr == &dev_attr_firmware.attr || + attr == &dev_attr_state.attr || + attr == &dev_attr_coredump.attr)) + mode = 0444; + + return mode; +} + static struct attribute *rproc_attrs[] = { &dev_attr_coredump.attr, &dev_attr_recovery.attr, @@ -240,7 +256,8 @@ static struct attribute *rproc_attrs[] = { }; static const struct attribute_group rproc_devgroup = { - .attrs = rproc_attrs + .attrs = rproc_attrs, + .is_visible = rproc_is_visible, }; static const struct attribute_group *rproc_devgroups[] = { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e0600e1e5c17..93a1d0050fbc 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -523,6 +523,7 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started + * @sysfs_read_only: flag to make remoteproc sysfs files read only * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc * @elf_class: firmware ELF class @@ -562,6 +563,7 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + bool sysfs_read_only; struct list_head dump_segments; int nb_vdev; u8 elf_class; -- cgit v1.2.3-71-gd317 From e0077cc13b831f8fad5557442f73bf7728683713 Mon Sep 17 00:00:00 2001 From: Si-Wei Liu Date: Fri, 14 Jan 2022 19:27:59 -0500 Subject: vdpa: factor out vdpa_set_features_unlocked for vdpa internal use No functional change introduced. vdpa bus driver such as virtio_vdpa or vhost_vdpa is not supposed to take care of the locking for core by its own. The locked API vdpa_set_features should suffice the bus driver's need. Signed-off-by: Si-Wei Liu Reviewed-by: Eli Cohen Link: https://lore.kernel.org/r/1642206481-30721-2-git-send-email-si-wei.liu@oracle.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/vdpa.c | 2 +- drivers/vhost/vdpa.c | 2 +- drivers/virtio/virtio_vdpa.c | 2 +- include/linux/vdpa.h | 18 ++++++++++++------ 4 files changed, 15 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 9846c9de4bfa..1ea525433a5c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -393,7 +393,7 @@ static void vdpa_get_config_unlocked(struct vdpa_device *vdev, * If it does happen we assume a legacy guest. */ if (!vdev->features_valid) - vdpa_set_features(vdev, 0, true); + vdpa_set_features_unlocked(vdev, 0); ops->get_config(vdev, offset, buf, len); } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 851539807bc9..ec5249e8c32d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; - if (vdpa_set_features(vdpa, features, false)) + if (vdpa_set_features(vdpa, features)) return -EINVAL; return 0; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7767a7f0119b..76504559bc25 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -317,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - return vdpa_set_features(vdpa, vdev->features, false); + return vdpa_set_features(vdpa, vdev->features); } static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 2de442ececae..721089bb4c84 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -401,18 +401,24 @@ static inline int vdpa_reset(struct vdpa_device *vdev) return ret; } -static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked) +static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features) { const struct vdpa_config_ops *ops = vdev->config; int ret; - if (!locked) - mutex_lock(&vdev->cf_mutex); - vdev->features_valid = true; ret = ops->set_driver_features(vdev, features); - if (!locked) - mutex_unlock(&vdev->cf_mutex); + + return ret; +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + int ret; + + mutex_lock(&vdev->cf_mutex); + ret = vdpa_set_features_unlocked(vdev, features); + mutex_unlock(&vdev->cf_mutex); return ret; } -- cgit v1.2.3-71-gd317 From b4060db9251f919506e4d672737c6b8ab9a84701 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 23 Feb 2022 08:34:48 -0800 Subject: PM: runtime: Have devm_pm_runtime_enable() handle pm_runtime_dont_use_autosuspend() The PM Runtime docs say: Drivers in ->remove() callback should undo the runtime PM changes done in ->probe(). Usually this means calling pm_runtime_disable(), pm_runtime_dont_use_autosuspend() etc. From grepping code, it's clear that many people aren't aware of the need to call pm_runtime_dont_use_autosuspend(). When brainstorming solutions, one idea that came up was to leverage the new-ish devm_pm_runtime_enable() function. The idea here is that: * When the devm action is called we know that the driver is being removed. It's the perfect time to undo the use_autosuspend. * The code of pm_runtime_dont_use_autosuspend() already handles the case of being called when autosuspend wasn't enabled. Suggested-by: Laurent Pinchart Signed-off-by: Douglas Anderson Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 5 +++++ include/linux/pm_runtime.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2f3cce17219b..d4059e6ffeae 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable); static void pm_runtime_disable_action(void *data) { + pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } /** * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable. + * + * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for + * you at driver exit time if needed. + * * @dev: Device to handle. */ int devm_pm_runtime_enable(struct device *dev) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9f09601c465a..2bff6a10095d 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -567,6 +567,10 @@ static inline void pm_runtime_disable(struct device *dev) * Allow the runtime PM autosuspend mechanism to be used for @dev whenever * requested (or "autosuspend" will be handled as direct runtime-suspend for * it). + * + * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() + * at driver exit time unless your driver initially enabled pm_runtime + * with devm_pm_runtime_enable() (which handles it for you). */ static inline void pm_runtime_use_autosuspend(struct device *dev) { -- cgit v1.2.3-71-gd317 From 92c45b63ce22c8898aa41806e8d6692bcd577510 Mon Sep 17 00:00:00 2001 From: Mark Tomlinson Date: Thu, 6 Aug 2020 16:14:55 +1200 Subject: PCI: Reduce warnings on possible RW1C corruption For hardware that only supports 32-bit writes to PCI there is the possibility of clearing RW1C (write-one-to-clear) bits. A rate-limited messages was introduced by fb2659230120, but rate-limiting is not the best choice here. Some devices may not show the warnings they should if another device has just produced a bunch of warnings. Also, the number of messages can be a nuisance on devices which are otherwise working fine. Change the ratelimit to a single warning per bus. This ensures no bus is 'starved' of emitting a warning and also that there isn't a continuous stream of warnings. It would be preferable to have a warning per device, but the pci_dev structure is not available here, and a lookup from devfn would be far too slow. Suggested-by: Bjorn Helgaas Fixes: fb2659230120 ("PCI: Warn on possible RW1C corruption for sub-32 bit config writes") Link: https://lore.kernel.org/r/20200806041455.11070-1-mark.tomlinson@alliedtelesis.co.nz Signed-off-by: Mark Tomlinson Signed-off-by: Bjorn Helgaas Reviewed-by: Florian Fainelli Reviewed-by: Rob Herring Acked-by: Scott Branden --- drivers/pci/access.c | 9 ++++++--- include/linux/pci.h | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 0d9f6b21babb..708c7529647f 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -159,9 +159,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, * write happen to have any RW1C (write-one-to-clear) bits set, we * just inadvertently cleared something we shouldn't have. */ - dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", - size, pci_domain_nr(bus), bus->number, - PCI_SLOT(devfn), PCI_FUNC(devfn), where); + if (!bus->unsafe_warn) { + dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where); + bus->unsafe_warn = 1; + } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; diff --git a/include/linux/pci.h b/include/linux/pci.h index 8253a5413d7c..678fecdf6b81 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -668,6 +668,7 @@ struct pci_bus { struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; + unsigned int unsafe_warn:1; /* warned about RW1C config write */ }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) -- cgit v1.2.3-71-gd317 From 5c26f6ac9416b63d093e29c30e79b3297e425472 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Mar 2022 20:28:51 -0800 Subject: mm: refactor vm_area_struct::anon_vma_name usage code Avoid mixing strings and their anon_vma_name referenced pointers by using struct anon_vma_name whenever possible. This simplifies the code and allows easier sharing of anon_vma_name structures when they represent the same name. [surenb@google.com: fix comment] Link: https://lkml.kernel.org/r/20220223153613.835563-1-surenb@google.com Link: https://lkml.kernel.org/r/20220224231834.1481408-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Matthew Wilcox Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Colin Cross Cc: Sumit Semwal Cc: Dave Hansen Cc: Kees Cook Cc: "Kirill A. Shutemov" Cc: Vlastimil Babka Cc: Johannes Weiner Cc: "Eric W. Biederman" Cc: Christian Brauner Cc: Alexey Gladkov Cc: Sasha Levin Cc: Chris Hyser Cc: Davidlohr Bueso Cc: Peter Collingbourne Cc: Xiaofeng Cao Cc: David Hildenbrand Cc: Cyrill Gorcunov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 6 ++-- fs/userfaultfd.c | 6 ++-- include/linux/mm.h | 7 ++-- include/linux/mm_inline.h | 87 ++++++++++++++++++++++++++++++++--------------- include/linux/mm_types.h | 5 ++- kernel/fork.c | 4 +-- kernel/sys.c | 19 +++++++---- mm/madvise.c | 87 ++++++++++++++++------------------------------- mm/mempolicy.c | 2 +- mm/mlock.c | 2 +- mm/mmap.c | 12 +++---- mm/mprotect.c | 2 +- 12 files changed, 125 insertions(+), 114 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6e97ed775074..2c48b1eaaa9c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -309,7 +309,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma) name = arch_vma_name(vma); if (!name) { - const char *anon_name; + struct anon_vma_name *anon_name; if (!mm) { name = "[vdso]"; @@ -327,10 +327,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma) goto done; } - anon_name = vma_anon_name(vma); + anon_name = anon_vma_name(vma); if (anon_name) { seq_pad(m, ' '); - seq_printf(m, "[anon:%s]", anon_name); + seq_printf(m, "[anon:%s]", anon_name->name); } } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index e26b10132d47..8e03b3d3f5fa 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -878,7 +878,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), - NULL_VM_UFFD_CTX, vma_anon_name(vma)); + NULL_VM_UFFD_CTX, anon_vma_name(vma)); if (prev) vma = prev; else @@ -1438,7 +1438,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), ((struct vm_userfaultfd_ctx){ ctx }), - vma_anon_name(vma)); + anon_vma_name(vma)); if (prev) { vma = prev; goto next; @@ -1615,7 +1615,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), - NULL_VM_UFFD_CTX, vma_anon_name(vma)); + NULL_VM_UFFD_CTX, anon_vma_name(vma)); if (prev) { vma = prev; goto next; diff --git a/include/linux/mm.h b/include/linux/mm.h index 213cc569b192..5744a3fc4716 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2626,7 +2626,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, - struct mempolicy *, struct vm_userfaultfd_ctx, const char *); + struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int __split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); @@ -3372,11 +3372,12 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) #ifdef CONFIG_ANON_VMA_NAME int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, - unsigned long len_in, const char *name); + unsigned long len_in, + struct anon_vma_name *anon_name); #else static inline int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, - unsigned long len_in, const char *name) { + unsigned long len_in, struct anon_vma_name *anon_name) { return 0; } #endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index b725839dfe71..dd3accaa4e6d 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -140,50 +140,81 @@ static __always_inline void del_page_from_lru_list(struct page *page, #ifdef CONFIG_ANON_VMA_NAME /* - * mmap_lock should be read-locked when calling vma_anon_name() and while using - * the returned pointer. + * mmap_lock should be read-locked when calling anon_vma_name(). Caller should + * either keep holding the lock while using the returned pointer or it should + * raise anon_vma_name refcount before releasing the lock. */ -extern const char *vma_anon_name(struct vm_area_struct *vma); +extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); +extern struct anon_vma_name *anon_vma_name_alloc(const char *name); +extern void anon_vma_name_free(struct kref *kref); -/* - * mmap_lock should be read-locked for orig_vma->vm_mm. - * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be - * isolated. - */ -extern void dup_vma_anon_name(struct vm_area_struct *orig_vma, - struct vm_area_struct *new_vma); +/* mmap_lock should be read-locked */ +static inline void anon_vma_name_get(struct anon_vma_name *anon_name) +{ + if (anon_name) + kref_get(&anon_name->kref); +} -/* - * mmap_lock should be write-locked or vma should have been isolated under - * write-locked mmap_lock protection. - */ -extern void free_vma_anon_name(struct vm_area_struct *vma); +static inline void anon_vma_name_put(struct anon_vma_name *anon_name) +{ + if (anon_name) + kref_put(&anon_name->kref, anon_vma_name_free); +} -/* mmap_lock should be read-locked */ -static inline bool is_same_vma_anon_name(struct vm_area_struct *vma, - const char *name) +static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, + struct vm_area_struct *new_vma) +{ + struct anon_vma_name *anon_name = anon_vma_name(orig_vma); + + if (anon_name) { + anon_vma_name_get(anon_name); + new_vma->anon_name = anon_name; + } +} + +static inline void free_anon_vma_name(struct vm_area_struct *vma) { - const char *vma_name = vma_anon_name(vma); + /* + * Not using anon_vma_name because it generates a warning if mmap_lock + * is not held, which might be the case here. + */ + if (!vma->vm_file) + anon_vma_name_put(vma->anon_name); +} - /* either both NULL, or pointers to same string */ - if (vma_name == name) +static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, + struct anon_vma_name *anon_name2) +{ + if (anon_name1 == anon_name2) return true; - return name && vma_name && !strcmp(name, vma_name); + return anon_name1 && anon_name2 && + !strcmp(anon_name1->name, anon_name2->name); } + #else /* CONFIG_ANON_VMA_NAME */ -static inline const char *vma_anon_name(struct vm_area_struct *vma) +static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) { return NULL; } -static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma, - struct vm_area_struct *new_vma) {} -static inline void free_vma_anon_name(struct vm_area_struct *vma) {} -static inline bool is_same_vma_anon_name(struct vm_area_struct *vma, - const char *name) + +static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) +{ + return NULL; +} + +static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} +static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} +static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, + struct vm_area_struct *new_vma) {} +static inline void free_anon_vma_name(struct vm_area_struct *vma) {} + +static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, + struct anon_vma_name *anon_name2) { return true; } + #endif /* CONFIG_ANON_VMA_NAME */ static inline void init_tlb_flush_pending(struct mm_struct *mm) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5140e5feb486..0f549870da6a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -416,7 +416,10 @@ struct vm_area_struct { struct rb_node rb; unsigned long rb_subtree_last; } shared; - /* Serialized by mmap_sem. */ + /* + * Serialized by mmap_sem. Never use directly because it is + * valid only when vm_file is NULL. Use anon_vma_name instead. + */ struct anon_vma_name *anon_name; }; diff --git a/kernel/fork.c b/kernel/fork.c index a024bf6254df..f1e89007f228 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -366,14 +366,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) *new = data_race(*orig); INIT_LIST_HEAD(&new->anon_vma_chain); new->vm_next = new->vm_prev = NULL; - dup_vma_anon_name(orig, new); + dup_anon_vma_name(orig, new); } return new; } void vm_area_free(struct vm_area_struct *vma) { - free_vma_anon_name(vma); + free_anon_vma_name(vma); kmem_cache_free(vm_area_cachep, vma); } diff --git a/kernel/sys.c b/kernel/sys.c index 97dc9e5d6bf9..5b0e172c4d47 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -2286,15 +2287,16 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr, { struct mm_struct *mm = current->mm; const char __user *uname; - char *name, *pch; + struct anon_vma_name *anon_name = NULL; int error; switch (opt) { case PR_SET_VMA_ANON_NAME: uname = (const char __user *)arg; if (uname) { - name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); + char *name, *pch; + name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); if (IS_ERR(name)) return PTR_ERR(name); @@ -2304,15 +2306,18 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr, return -EINVAL; } } - } else { - /* Reset the name */ - name = NULL; + /* anon_vma has its own copy */ + anon_name = anon_vma_name_alloc(name); + kfree(name); + if (!anon_name) + return -ENOMEM; + } mmap_write_lock(mm); - error = madvise_set_anon_name(mm, addr, size, name); + error = madvise_set_anon_name(mm, addr, size, anon_name); mmap_write_unlock(mm); - kfree(name); + anon_vma_name_put(anon_name); break; default: error = -EINVAL; diff --git a/mm/madvise.c b/mm/madvise.c index 5604064df464..081b1cded21e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -65,7 +65,7 @@ static int madvise_need_mmap_write(int behavior) } #ifdef CONFIG_ANON_VMA_NAME -static struct anon_vma_name *anon_vma_name_alloc(const char *name) +struct anon_vma_name *anon_vma_name_alloc(const char *name) { struct anon_vma_name *anon_name; size_t count; @@ -81,78 +81,49 @@ static struct anon_vma_name *anon_vma_name_alloc(const char *name) return anon_name; } -static void vma_anon_name_free(struct kref *kref) +void anon_vma_name_free(struct kref *kref) { struct anon_vma_name *anon_name = container_of(kref, struct anon_vma_name, kref); kfree(anon_name); } -static inline bool has_vma_anon_name(struct vm_area_struct *vma) +struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) { - return !vma->vm_file && vma->anon_name; -} - -const char *vma_anon_name(struct vm_area_struct *vma) -{ - if (!has_vma_anon_name(vma)) - return NULL; - mmap_assert_locked(vma->vm_mm); - return vma->anon_name->name; -} - -void dup_vma_anon_name(struct vm_area_struct *orig_vma, - struct vm_area_struct *new_vma) -{ - if (!has_vma_anon_name(orig_vma)) - return; - - kref_get(&orig_vma->anon_name->kref); - new_vma->anon_name = orig_vma->anon_name; -} - -void free_vma_anon_name(struct vm_area_struct *vma) -{ - struct anon_vma_name *anon_name; - - if (!has_vma_anon_name(vma)) - return; + if (vma->vm_file) + return NULL; - anon_name = vma->anon_name; - vma->anon_name = NULL; - kref_put(&anon_name->kref, vma_anon_name_free); + return vma->anon_name; } /* mmap_lock should be write-locked */ -static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name) +static int replace_anon_vma_name(struct vm_area_struct *vma, + struct anon_vma_name *anon_name) { - const char *anon_name; + struct anon_vma_name *orig_name = anon_vma_name(vma); - if (!name) { - free_vma_anon_name(vma); + if (!anon_name) { + vma->anon_name = NULL; + anon_vma_name_put(orig_name); return 0; } - anon_name = vma_anon_name(vma); - if (anon_name) { - /* Same name, nothing to do here */ - if (!strcmp(name, anon_name)) - return 0; + if (anon_vma_name_eq(orig_name, anon_name)) + return 0; - free_vma_anon_name(vma); - } - vma->anon_name = anon_vma_name_alloc(name); - if (!vma->anon_name) - return -ENOMEM; + anon_vma_name_get(anon_name); + vma->anon_name = anon_name; + anon_vma_name_put(orig_name); return 0; } #else /* CONFIG_ANON_VMA_NAME */ -static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name) +static int replace_anon_vma_name(struct vm_area_struct *vma, + struct anon_vma_name *anon_name) { - if (name) + if (anon_name) return -EINVAL; return 0; @@ -165,13 +136,13 @@ static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name) static int madvise_update_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long new_flags, - const char *name) + struct anon_vma_name *anon_name) { struct mm_struct *mm = vma->vm_mm; int error; pgoff_t pgoff; - if (new_flags == vma->vm_flags && is_same_vma_anon_name(vma, name)) { + if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { *prev = vma; return 0; } @@ -179,7 +150,7 @@ static int madvise_update_vma(struct vm_area_struct *vma, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, name); + vma->vm_userfaultfd_ctx, anon_name); if (*prev) { vma = *prev; goto success; @@ -209,7 +180,7 @@ success: */ vma->vm_flags = new_flags; if (!vma->vm_file) { - error = replace_vma_anon_name(vma, name); + error = replace_anon_vma_name(vma, anon_name); if (error) return error; } @@ -1041,7 +1012,7 @@ static int madvise_vma_behavior(struct vm_area_struct *vma, } error = madvise_update_vma(vma, prev, start, end, new_flags, - vma_anon_name(vma)); + anon_vma_name(vma)); out: /* @@ -1225,7 +1196,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, static int madvise_vma_anon_name(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, - unsigned long name) + unsigned long anon_name) { int error; @@ -1234,7 +1205,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma, return -EBADF; error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, - (const char *)name); + (struct anon_vma_name *)anon_name); /* * madvise() returns EAGAIN if kernel resources, such as @@ -1246,7 +1217,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma, } int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, - unsigned long len_in, const char *name) + unsigned long len_in, struct anon_vma_name *anon_name) { unsigned long end; unsigned long len; @@ -1266,7 +1237,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, if (end == start) return 0; - return madvise_walk_vmas(mm, start, end, (unsigned long)name, + return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, madvise_vma_anon_name); } #endif /* CONFIG_ANON_VMA_NAME */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 028e8dd82b44..69284d3b5e53 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -814,7 +814,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma->vm_userfaultfd_ctx, - vma_anon_name(vma)); + anon_vma_name(vma)); if (prev) { vma = prev; next = vma->vm_next; diff --git a/mm/mlock.c b/mm/mlock.c index 8f584eddd305..25934e7db3e1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -512,7 +512,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, vma_anon_name(vma)); + vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (*prev) { vma = *prev; goto success; diff --git a/mm/mmap.c b/mm/mmap.c index d445c1b9d606..f61a15474dd6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1031,7 +1031,7 @@ again: static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char *anon_name) + struct anon_vma_name *anon_name) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we @@ -1049,7 +1049,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma, return 0; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return 0; - if (!is_same_vma_anon_name(vma, anon_name)) + if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) return 0; return 1; } @@ -1084,7 +1084,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char *anon_name) + struct anon_vma_name *anon_name) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { @@ -1106,7 +1106,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char *anon_name) + struct anon_vma_name *anon_name) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { @@ -1167,7 +1167,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char *anon_name) + struct anon_vma_name *anon_name) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; @@ -3256,7 +3256,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, vma_anon_name(vma)); + vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (new_vma) { /* * Source vma may have been merged into new_vma diff --git a/mm/mprotect.c b/mm/mprotect.c index 5ca3fbcb1495..2887644fd150 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -464,7 +464,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, vma_anon_name(vma)); + vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (*pprev) { vma = *pprev; VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); -- cgit v1.2.3-71-gd317 From 96403e11283def1d1c465c8279514c9a504d8630 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Mar 2022 20:28:55 -0800 Subject: mm: prevent vm_area_struct::anon_name refcount saturation A deep process chain with many vmas could grow really high. With default sysctl_max_map_count (64k) and default pid_max (32k) the max number of vmas in the system is 2147450880 and the refcounter has headroom of 1073774592 before it reaches REFCOUNT_SATURATED (3221225472). Therefore it's unlikely that an anonymous name refcounter will overflow with these defaults. Currently the max for pid_max is PID_MAX_LIMIT (4194304) and for sysctl_max_map_count it's INT_MAX (2147483647). In this configuration anon_vma_name refcount overflow becomes theoretically possible (that still require heavy sharing of that anon_vma_name between processes). kref refcounting interface used in anon_vma_name structure will detect a counter overflow when it reaches REFCOUNT_SATURATED value but will only generate a warning and freeze the ref counter. This would lead to the refcounted object never being freed. A determined attacker could leak memory like that but it would be rather expensive and inefficient way to do so. To ensure anon_vma_name refcount does not overflow, stop anon_vma_name sharing when the refcount reaches REFCOUNT_MAX (2147483647), which still leaves INT_MAX/2 (1073741823) values before the counter reaches REFCOUNT_SATURATED. This should provide enough headroom for raising the refcounts temporarily. Link: https://lkml.kernel.org/r/20220223153613.835563-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Alexey Gladkov Cc: Chris Hyser Cc: Christian Brauner Cc: Colin Cross Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: David Hildenbrand Cc: Davidlohr Bueso Cc: "Eric W. Biederman" Cc: Johannes Weiner Cc: Kees Cook Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Peter Collingbourne Cc: Sasha Levin Cc: Sumit Semwal Cc: Vlastimil Babka Cc: Xiaofeng Cao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 18 ++++++++++++++---- mm/madvise.c | 3 +-- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index dd3accaa4e6d..cf90b1fa2c60 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -161,15 +161,25 @@ static inline void anon_vma_name_put(struct anon_vma_name *anon_name) kref_put(&anon_name->kref, anon_vma_name_free); } +static inline +struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) +{ + /* Prevent anon_name refcount saturation early on */ + if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { + anon_vma_name_get(anon_name); + return anon_name; + + } + return anon_vma_name_alloc(anon_name->name); +} + static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, struct vm_area_struct *new_vma) { struct anon_vma_name *anon_name = anon_vma_name(orig_vma); - if (anon_name) { - anon_vma_name_get(anon_name); - new_vma->anon_name = anon_name; - } + if (anon_name) + new_vma->anon_name = anon_vma_name_reuse(anon_name); } static inline void free_anon_vma_name(struct vm_area_struct *vma) diff --git a/mm/madvise.c b/mm/madvise.c index 081b1cded21e..1f2693dccf7b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -113,8 +113,7 @@ static int replace_anon_vma_name(struct vm_area_struct *vma, if (anon_vma_name_eq(orig_name, anon_name)) return 0; - anon_vma_name_get(anon_name); - vma->anon_name = anon_name; + vma->anon_name = anon_vma_name_reuse(anon_name); anon_vma_name_put(orig_name); return 0; -- cgit v1.2.3-71-gd317 From 25b35dd28138f61f9a0fb8b76c0483761fd228bd Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:38 +0530 Subject: bpf: Add check_func_arg_reg_off function Lift the list of register types allowed for having fixed and variable offsets when passed as helper function arguments into a common helper, so that they can be reused for kfunc checks in later commits. Keeping a common helper aids maintainability and allows us to follow the same consistent rules across helpers and kfuncs. Also, convert check_func_arg to use this function. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-2-memxor@gmail.com --- include/linux/bpf_verifier.h | 3 ++ kernel/bpf/verifier.c | 69 ++++++++++++++++++++++++++------------------ 2 files changed, 44 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7a7be8c057f2..38b24ee8d8c2 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -521,6 +521,9 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); +int check_func_arg_reg_off(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + enum bpf_arg_type arg_type); int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a57db4b2803c..e37eb6020253 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5359,6 +5359,44 @@ found: return 0; } +int check_func_arg_reg_off(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + enum bpf_arg_type arg_type) +{ + enum bpf_reg_type type = reg->type; + bool fixed_off_ok = false; + + switch ((u32)type) { + case SCALAR_VALUE: + /* Pointer types where reg offset is explicitly allowed: */ + case PTR_TO_PACKET: + case PTR_TO_PACKET_META: + case PTR_TO_MAP_KEY: + case PTR_TO_MAP_VALUE: + case PTR_TO_MEM: + case PTR_TO_MEM | MEM_RDONLY: + case PTR_TO_MEM | MEM_ALLOC: + case PTR_TO_BUF: + case PTR_TO_BUF | MEM_RDONLY: + case PTR_TO_STACK: + /* Some of the argument types nevertheless require a + * zero register offset. + */ + if (arg_type != ARG_PTR_TO_ALLOC_MEM) + return 0; + break; + /* All the rest must be rejected, except PTR_TO_BTF_ID which allows + * fixed offset. + */ + case PTR_TO_BTF_ID: + fixed_off_ok = true; + break; + default: + break; + } + return __check_ptr_off_reg(env, reg, regno, fixed_off_ok); +} + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@ -5408,34 +5446,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; - switch ((u32)type) { - case SCALAR_VALUE: - /* Pointer types where reg offset is explicitly allowed: */ - case PTR_TO_PACKET: - case PTR_TO_PACKET_META: - case PTR_TO_MAP_KEY: - case PTR_TO_MAP_VALUE: - case PTR_TO_MEM: - case PTR_TO_MEM | MEM_RDONLY: - case PTR_TO_MEM | MEM_ALLOC: - case PTR_TO_BUF: - case PTR_TO_BUF | MEM_RDONLY: - case PTR_TO_STACK: - /* Some of the argument types nevertheless require a - * zero register offset. - */ - if (arg_type == ARG_PTR_TO_ALLOC_MEM) - goto force_off_check; - break; - /* All the rest must be rejected: */ - default: -force_off_check: - err = __check_ptr_off_reg(env, reg, regno, - type == PTR_TO_BTF_ID); - if (err < 0) - return err; - break; - } + err = check_func_arg_reg_off(env, reg, regno, arg_type); + if (err) + return err; skip_type_check: if (reg->ref_obj_id) { -- cgit v1.2.3-71-gd317 From 24d5bb806c7e2c0b9972564fd493069f612d90dd Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:41 +0530 Subject: bpf: Harden register offset checks for release helpers and kfuncs Let's ensure that the PTR_TO_BTF_ID reg being passed in to release BPF helpers and kfuncs always has its offset set to 0. While not a real problem now, there's a very real possibility this will become a problem when more and more kfuncs are exposed, and more BPF helpers are added which can release PTR_TO_BTF_ID. Previous commits already protected against non-zero var_off. One of the case we are concerned about now is when we have a type that can be returned by e.g. an acquire kfunc: struct foo { int a; int b; struct bar b; }; ... and struct bar is also a type that can be returned by another acquire kfunc. Then, doing the following sequence: struct foo *f = bpf_get_foo(); // acquire kfunc if (!f) return 0; bpf_put_bar(&f->b); // release kfunc ... would work with the current code, since the btf_struct_ids_match takes reg->off into account for matching pointer type with release kfunc argument type, but would obviously be incorrect, and most likely lead to a kernel crash. A test has been included later to prevent regressions in this area. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-5-memxor@gmail.com --- include/linux/bpf_verifier.h | 3 ++- kernel/bpf/btf.c | 33 +++++++++++++++++++-------------- kernel/bpf/verifier.c | 25 ++++++++++++++++++++++--- 3 files changed, 43 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b24ee8d8c2..c1fc4af47f69 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -523,7 +523,8 @@ int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); int check_func_arg_reg_off(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, - enum bpf_arg_type arg_type); + enum bpf_arg_type arg_type, + bool is_release_func); int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7f6a0ae5028b..162807e3b4a5 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5753,6 +5753,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; } + /* Only kfunc can be release func */ + if (is_kfunc) + rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_RELEASE, func_id); /* check that BTF function arguments match actual types that the * verifier sees. */ @@ -5777,7 +5781,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); ref_tname = btf_name_by_offset(btf, ref_t->name_off); - ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE); + ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE, rel); if (ret < 0) return ret; @@ -5809,7 +5813,11 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, if (reg->type == PTR_TO_BTF_ID) { reg_btf = reg->btf; reg_ref_id = reg->btf_id; - /* Ensure only one argument is referenced PTR_TO_BTF_ID */ + /* Ensure only one argument is referenced + * PTR_TO_BTF_ID, check_func_arg_reg_off relies + * on only one referenced register being allowed + * for kfuncs. + */ if (reg->ref_obj_id) { if (ref_obj_id) { bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", @@ -5891,18 +5899,15 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, /* Either both are set, or neither */ WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); - if (is_kfunc) { - rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), - BTF_KFUNC_TYPE_RELEASE, func_id); - /* We already made sure ref_obj_id is set only for one argument */ - if (rel && !ref_obj_id) { - bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", - func_name); - return -EINVAL; - } - /* Allow (!rel && ref_obj_id), so that passing such referenced PTR_TO_BTF_ID to - * other kfuncs works - */ + /* We already made sure ref_obj_id is set only for one argument. We do + * allow (!rel && ref_obj_id), so that passing such referenced + * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when + * is_kfunc is true. + */ + if (rel && !ref_obj_id) { + bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", + func_name); + return -EINVAL; } /* returns argument register number > 0 in case of reference release kfunc */ return rel ? ref_regno : 0; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 455b4ab69e47..fe9a513e2314 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5367,10 +5367,11 @@ found: int check_func_arg_reg_off(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, - enum bpf_arg_type arg_type) + enum bpf_arg_type arg_type, + bool is_release_func) { + bool fixed_off_ok = false, release_reg; enum bpf_reg_type type = reg->type; - bool fixed_off_ok = false; switch ((u32)type) { case SCALAR_VALUE: @@ -5395,6 +5396,21 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, * fixed offset. */ case PTR_TO_BTF_ID: + /* When referenced PTR_TO_BTF_ID is passed to release function, + * it's fixed offset must be 0. We rely on the property that + * only one referenced register can be passed to BPF helpers and + * kfuncs. In the other cases, fixed offset can be non-zero. + */ + release_reg = is_release_func && reg->ref_obj_id; + if (release_reg && reg->off) { + verbose(env, "R%d must have zero offset when passed to release func\n", + regno); + return -EINVAL; + } + /* For release_reg == true, fixed_off_ok must be false, but we + * already checked and rejected reg->off != 0 above, so set to + * true to allow fixed offset for all other cases. + */ fixed_off_ok = true; break; default: @@ -5452,11 +5468,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; - err = check_func_arg_reg_off(env, reg, regno, arg_type); + err = check_func_arg_reg_off(env, reg, regno, arg_type, is_release_function(meta->func_id)); if (err) return err; skip_type_check: + /* check_func_arg_reg_off relies on only one referenced register being + * allowed for BPF helpers. + */ if (reg->ref_obj_id) { if (meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", -- cgit v1.2.3-71-gd317 From f014a00bbeb09cea16017b82448d32a468a6b96f Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 5 Mar 2022 04:16:42 +0530 Subject: compiler-clang.h: Add __diag infrastructure for clang Add __diag macros similar to those in compiler-gcc.h, so that warnings that need to be adjusted for specific cases but not globally can be ignored when building with clang. Signed-off-by: Nathan Chancellor Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-6-memxor@gmail.com [ Kartikeya: wrote commit message ] --- include/linux/compiler-clang.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3c4de9b6c6e3..f1aa41d520bd 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -68,3 +68,25 @@ #define __nocfi __attribute__((__no_sanitize__("cfi"))) #define __cficanonical __attribute__((__cfi_canonical_jump_table__)) + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_clang(version, severity, s) \ + __diag_clang_ ## version(__diag_clang_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_clang_ignore ignored +#define __diag_clang_warn warning +#define __diag_clang_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(clang diagnostic s)) + +#if CONFIG_CLANG_VERSION >= 110000 +#define __diag_clang_11(s) __diag(s) +#else +#define __diag_clang_11(s) +#endif -- cgit v1.2.3-71-gd317 From 4d1ea705d797e66edd70ffa708b83888a210a437 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:43 +0530 Subject: compiler_types.h: Add unified __diag_ignore_all for GCC/LLVM Add a __diag_ignore_all macro, to ignore warnings for both GCC and LLVM, without having to specify the compiler type and version. By default, GCC 8 and clang 11 are used. This will be used by bpf subsystem to ignore -Wmissing-prototypes warning for functions that are meant to be global functions so that they are in vmlinux BTF, but don't have a prototype. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-7-memxor@gmail.com --- include/linux/compiler-clang.h | 3 +++ include/linux/compiler-gcc.h | 3 +++ include/linux/compiler_types.h | 4 ++++ 3 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index f1aa41d520bd..babb1347148c 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -90,3 +90,6 @@ #else #define __diag_clang_11(s) #endif + +#define __diag_ignore_all(option, comment) \ + __diag_clang(11, ignore, option) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index ccbbd31b3aae..d364c98a4a80 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -151,6 +151,9 @@ #define __diag_GCC_8(s) #endif +#define __diag_ignore_all(option, comment) \ + __diag_GCC(8, ignore, option) + /* * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size" * attribute) do not work, and must be disabled. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3f31ff400432..8e5d2f50f951 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -371,4 +371,8 @@ struct ftrace_likely_data { #define __diag_error(compiler, version, option, comment) \ __diag_ ## compiler(version, error, option) +#ifndef __diag_ignore_all +#define __diag_ignore_all(option, comment) +#endif + #endif /* __LINUX_COMPILER_TYPES_H */ -- cgit v1.2.3-71-gd317 From 9216c916237805c93d054ed022afb172ddbc3ed1 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:55 -0800 Subject: compiler_types: Define __percpu as __attribute__((btf_type_tag("percpu"))) This is similar to commit 7472d5a642c9 ("compiler_types: define __user as __attribute__((btf_type_tag("user")))"), where a type tag "user" was introduced to identify the pointers that point to user memory. With that change, the newest compile toolchain can encode __user information into vmlinux BTF, which can be used by the BPF verifier to enforce safe program behaviors. Similarly, we have __percpu attribute, which is mainly used to indicate memory is allocated in percpu region. The __percpu pointers in kernel are supposed to be used together with functions like per_cpu_ptr() and this_cpu_ptr(), which perform necessary calculation on the pointer's base address. Without the btf_type_tag introduced in this patch, __percpu pointers will be treated as regular memory pointers in vmlinux BTF and BPF programs are allowed to directly dereference them, generating incorrect behaviors. Now with "percpu" btf_type_tag, the BPF verifier is able to differentiate __percpu pointers from regular pointers and forbids unexpected behaviors like direct load. The following is an example similar to the one given in commit 7472d5a642c9: [$ ~] cat test.c #define __percpu __attribute__((btf_type_tag("percpu"))) int foo(int __percpu *arg) { return *arg; } [$ ~] clang -O2 -g -c test.c [$ ~] pahole -JV test.o ... File test.o: [1] INT int size=4 nr_bits=32 encoding=SIGNED [2] TYPE_TAG percpu type_id=1 [3] PTR (anon) type_id=2 [4] FUNC_PROTO (anon) return=1 args=(3 arg) [5] FUNC foo type_id=4 [$ ~] for the function argument "int __percpu *arg", its type is described as PTR -> TYPE_TAG(percpu) -> INT The kernel can use this information for bpf verification or other use cases. Like commit 7472d5a642c9, this feature requires clang (>= clang14) and pahole (>= 1.23). Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-3-haoluo@google.com --- include/linux/compiler_types.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 8e5d2f50f951..b9a8ae9440c7 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -38,7 +38,12 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __user # endif # define __iomem -# define __percpu +# if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define __percpu __attribute__((btf_type_tag("percpu"))) +# else +# define __percpu +# endif # define __rcu # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 -- cgit v1.2.3-71-gd317 From 5844101a1be9b8636024cb31c865ef13c7cc6db3 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:56 -0800 Subject: bpf: Reject programs that try to load __percpu memory. With the introduction of the btf_type_tag "percpu", we can add a MEM_PERCPU to identify those pointers that point to percpu memory. The ability of differetiating percpu pointers from regular memory pointers have two benefits: 1. It forbids unexpected use of percpu pointers, such as direct loads. In kernel, there are special functions used for accessing percpu memory. Directly loading percpu memory is meaningless. We already have BPF helpers like bpf_per_cpu_ptr() and bpf_this_cpu_ptr() that wrap the kernel percpu functions. So we can now convert percpu pointers into regular pointers in a safe way. 2. Previously, bpf_per_cpu_ptr() and bpf_this_cpu_ptr() only work on PTR_TO_PERCPU_BTF_ID, a special reg_type which describes static percpu variables in kernel (we rely on pahole to encode them into vmlinux BTF). Now, since we can identify __percpu tagged pointers, we can also identify dynamically allocated percpu memory as well. It means we can use bpf_xxx_cpu_ptr() on dynamic percpu memory. This would be very convenient when accessing fields like "cgroup->rstat_cpu". Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-4-haoluo@google.com --- include/linux/bpf.h | 11 +++++++++-- kernel/bpf/btf.c | 8 +++++++- kernel/bpf/verifier.c | 24 ++++++++++++++---------- 3 files changed, 30 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f19abc59b6cd..88449fbbe063 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -334,7 +334,15 @@ enum bpf_type_flag { /* MEM is in user address space. */ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), - __BPF_TYPE_LAST_FLAG = MEM_USER, + /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged + * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In + * order to drop this tag, it must be passed into bpf_per_cpu_ptr() + * or bpf_this_cpu_ptr(), which will return the pointer corresponding + * to the specified cpu. + */ + MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_PERCPU, }; /* Max number of base types. */ @@ -516,7 +524,6 @@ enum bpf_reg_type { */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_BUF, /* reg points to a read/write buffer */ - PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ PTR_TO_FUNC, /* reg points to a bpf program function */ __BPF_REG_TYPE_MAX, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 162807e3b4a5..8b34563a832e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5057,6 +5057,8 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, tag_value = __btf_name_by_offset(btf, t->name_off); if (strcmp(tag_value, "user") == 0) info->reg_type |= MEM_USER; + if (strcmp(tag_value, "percpu") == 0) + info->reg_type |= MEM_PERCPU; } /* skip modifiers */ @@ -5285,12 +5287,16 @@ error: return -EACCES; } - /* check __user tag */ + /* check type tag */ t = btf_type_by_id(btf, mtype->type); if (btf_type_is_type_tag(t)) { tag_value = __btf_name_by_offset(btf, t->name_off); + /* check __user tag */ if (strcmp(tag_value, "user") == 0) tmp_flag = MEM_USER; + /* check __percpu tag */ + if (strcmp(tag_value, "percpu") == 0) + tmp_flag = MEM_PERCPU; } stype = btf_type_skip_modifiers(btf, mtype->type, &id); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7a6b58fea37d..ec3a7b6c9515 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -554,7 +554,6 @@ static const char *reg_type_str(struct bpf_verifier_env *env, [PTR_TO_TP_BUFFER] = "tp_buffer", [PTR_TO_XDP_SOCK] = "xdp_sock", [PTR_TO_BTF_ID] = "ptr_", - [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", [PTR_TO_MEM] = "mem", [PTR_TO_BUF] = "buf", [PTR_TO_FUNC] = "func", @@ -562,8 +561,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env, }; if (type & PTR_MAYBE_NULL) { - if (base_type(type) == PTR_TO_BTF_ID || - base_type(type) == PTR_TO_PERCPU_BTF_ID) + if (base_type(type) == PTR_TO_BTF_ID) strncpy(postfix, "or_null_", 16); else strncpy(postfix, "_or_null", 16); @@ -575,6 +573,8 @@ static const char *reg_type_str(struct bpf_verifier_env *env, strncpy(prefix, "alloc_", 32); if (type & MEM_USER) strncpy(prefix, "user_", 32); + if (type & MEM_PERCPU) + strncpy(prefix, "percpu_", 32); snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", prefix, str[base_type(type)], postfix); @@ -697,8 +697,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, const char *sep = ""; verbose(env, "%s", reg_type_str(env, t)); - if (base_type(t) == PTR_TO_BTF_ID || - base_type(t) == PTR_TO_PERCPU_BTF_ID) + if (base_type(t) == PTR_TO_BTF_ID) verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); verbose(env, "("); /* @@ -2783,7 +2782,6 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BUF: - case PTR_TO_PERCPU_BTF_ID: case PTR_TO_MEM: case PTR_TO_FUNC: case PTR_TO_MAP_KEY: @@ -4203,6 +4201,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, return -EACCES; } + if (reg->type & MEM_PERCPU) { + verbose(env, + "R%d is ptr_%s access percpu memory: off=%d\n", + regno, tname, off); + return -EACCES; + } + if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, reg->btf, t, off, size, atype, &btf_id, &flag); @@ -4809,7 +4814,7 @@ static int check_stack_range_initialized( } if (is_spilled_reg(&state->stack[spi]) && - state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) + base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID) goto mark; if (is_spilled_reg(&state->stack[spi]) && @@ -5265,7 +5270,7 @@ static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | ME static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; -static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } }; +static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } }; static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; @@ -9677,7 +9682,6 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) dst_reg->mem_size = aux->btf_var.mem_size; break; case PTR_TO_BTF_ID: - case PTR_TO_PERCPU_BTF_ID: dst_reg->btf = aux->btf_var.btf; dst_reg->btf_id = aux->btf_var.btf_id; break; @@ -11877,7 +11881,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, type = t->type; t = btf_type_skip_modifiers(btf, type, NULL); if (percpu) { - aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; + aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; aux->btf_var.btf = btf; aux->btf_var.btf_id = type; } else if (!btf_type_is_struct(t)) { -- cgit v1.2.3-71-gd317 From 736f16de75f9bb32d76f652cb66f04d1bc685057 Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Fri, 4 Mar 2022 06:55:05 -0800 Subject: net: tap: track dropped skb via kfree_skb_reason() The TAP can be used as vhost-net backend. E.g., the tap_handle_frame() is the interface to forward the skb from TAP to vhost-net/virtio-net. However, there are many "goto drop" in the TAP driver. Therefore, the kfree_skb_reason() is involved at each "goto drop" to help userspace ftrace/ebpf to track the reason for the loss of packets. The below reasons are introduced: - SKB_DROP_REASON_SKB_CSUM - SKB_DROP_REASON_SKB_GSO_SEG - SKB_DROP_REASON_SKB_UCOPY_FAULT - SKB_DROP_REASON_DEV_HDR - SKB_DROP_REASON_FULL_RING Cc: Joao Martins Cc: Joe Jin Signed-off-by: Dongli Zhang Signed-off-by: David S. Miller --- drivers/net/tap.c | 35 +++++++++++++++++++++++++---------- include/linux/skbuff.h | 13 +++++++++++++ include/trace/events/skb.h | 5 +++++ 3 files changed, 43 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/tap.c b/drivers/net/tap.c index ba2ef5437e16..c3d42062559d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct tap_dev *tap; struct tap_queue *q; netdev_features_t features = TAP_FEATURES; + enum skb_drop_reason drop_reason; tap = tap_dev_get_rcu(dev); if (!tap) @@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct sk_buff *segs = __skb_gso_segment(skb, features, false); struct sk_buff *next; - if (IS_ERR(segs)) + if (IS_ERR(segs)) { + drop_reason = SKB_DROP_REASON_SKB_GSO_SEG; goto drop; + } if (!segs) { - if (ptr_ring_produce(&q->ring, skb)) + if (ptr_ring_produce(&q->ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } goto wake_up; } @@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) skb_list_walk_safe(segs, skb, next) { skb_mark_not_on_list(skb); if (ptr_ring_produce(&q->ring, skb)) { - kfree_skb(skb); - kfree_skb_list(next); + drop_reason = SKB_DROP_REASON_FULL_RING; + kfree_skb_reason(skb, drop_reason); + kfree_skb_list_reason(next, drop_reason); break; } } @@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) */ if (skb->ip_summed == CHECKSUM_PARTIAL && !(features & NETIF_F_CSUM_MASK) && - skb_checksum_help(skb)) + skb_checksum_help(skb)) { + drop_reason = SKB_DROP_REASON_SKB_CSUM; goto drop; - if (ptr_ring_produce(&q->ring, skb)) + } + if (ptr_ring_produce(&q->ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } } wake_up: @@ -383,7 +393,7 @@ drop: /* Count errors/drops only here, thus don't care about args. */ if (tap->count_rx_dropped) tap->count_rx_dropped(tap); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); return RX_HANDLER_CONSUMED; } EXPORT_SYMBOL_GPL(tap_handle_frame); @@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, int depth; bool zerocopy = false; size_t linear; + enum skb_drop_reason drop_reason; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); @@ -696,8 +707,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, else err = skb_copy_datagram_from_iter(skb, 0, from, len); - if (err) + if (err) { + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto err_kfree; + } skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); @@ -706,8 +719,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, tap_is_little_endian(q)); - if (err) + if (err) { + drop_reason = SKB_DROP_REASON_DEV_HDR; goto err_kfree; + } } skb_probe_transport_header(skb); @@ -738,7 +753,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, return total_len; err_kfree: - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); err: rcu_read_lock(); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2be263184d1e..67cfff4065b6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -412,6 +412,19 @@ enum skb_drop_reason { * this means that L3 protocol is * not supported */ + SKB_DROP_REASON_SKB_CSUM, /* sk_buff checksum computation + * error + */ + SKB_DROP_REASON_SKB_GSO_SEG, /* gso segmentation error */ + SKB_DROP_REASON_SKB_UCOPY_FAULT, /* failed to copy data from + * user space, e.g., via + * zerocopy_sg_from_iter() + * or skb_orphan_frags_rx() + */ + SKB_DROP_REASON_DEV_HDR, /* device driver specific + * header/metadata is invalid + */ + SKB_DROP_REASON_FULL_RING, /* ring buffer is full */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index c0769d943f8e..240e7e7591fc 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -51,6 +51,11 @@ EM(SKB_DROP_REASON_XDP, XDP) \ EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \ EM(SKB_DROP_REASON_PTYPE_ABSENT, PTYPE_ABSENT) \ + EM(SKB_DROP_REASON_SKB_CSUM, SKB_CSUM) \ + EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG) \ + EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT) \ + EM(SKB_DROP_REASON_DEV_HDR, DEV_HDR) \ + EM(SKB_DROP_REASON_FULL_RING, FULL_RING) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM -- cgit v1.2.3-71-gd317 From 4b4f052e2d89c2eb7e13ee28ba9e85f8097aef3d Mon Sep 17 00:00:00 2001 From: Dongli Zhang Date: Fri, 4 Mar 2022 06:55:07 -0800 Subject: net: tun: track dropped skb via kfree_skb_reason() The TUN can be used as vhost-net backend. E.g, the tun_net_xmit() is the interface to forward the skb from TUN to vhost-net/virtio-net. However, there are many "goto drop" in the TUN driver. Therefore, the kfree_skb_reason() is involved at each "goto drop" to help userspace ftrace/ebpf to track the reason for the loss of packets. The below reasons are introduced: - SKB_DROP_REASON_DEV_READY - SKB_DROP_REASON_NOMEM - SKB_DROP_REASON_HDR_TRUNC - SKB_DROP_REASON_TAP_FILTER - SKB_DROP_REASON_TAP_TXFILTER Cc: Joao Martins Cc: Joe Jin Signed-off-by: Dongli Zhang Signed-off-by: David S. Miller --- drivers/net/tun.c | 37 ++++++++++++++++++++++++++++--------- include/linux/skbuff.h | 18 ++++++++++++++++++ include/trace/events/skb.h | 5 +++++ 3 files changed, 51 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6e06c846fe82..bab92e489fba 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1058,6 +1058,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun, static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); + enum skb_drop_reason drop_reason; int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; @@ -1067,8 +1068,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ - if (!tfile) + if (!tfile) { + drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; + } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); @@ -1078,22 +1081,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ - if (!check_filter(&tun->txflt, skb)) + if (!check_filter(&tun->txflt, skb)) { + drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; + } if (tfile->socket.sk->sk_filter && - sk_filter(tfile->socket.sk, skb)) + sk_filter(tfile->socket.sk, skb)) { + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto drop; + } len = run_ebpf_filter(tun, skb, len); - if (len == 0) + if (len == 0) { + drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; + } - if (pskb_trim(skb, len)) + if (pskb_trim(skb, len)) { + drop_reason = SKB_DROP_REASON_NOMEM; goto drop; + } - if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; + } skb_tx_timestamp(skb); @@ -1104,8 +1117,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); - if (ptr_ring_produce(&tfile->tx_ring, skb)) + if (ptr_ring_produce(&tfile->tx_ring, skb)) { + drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; + } /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); @@ -1122,7 +1137,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) drop: atomic_long_inc(&dev->tx_dropped); skb_tx_error(skb); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } @@ -1720,6 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); + enum skb_drop_reason drop_reason; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) @@ -1823,9 +1839,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (err) { err = -EFAULT; + drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; drop: atomic_long_inc(&tun->dev->rx_dropped); - kfree_skb(skb); + kfree_skb_reason(skb, drop_reason); if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); @@ -1872,6 +1889,7 @@ drop: case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; + drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); @@ -1925,6 +1943,7 @@ drop: if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); + drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 67cfff4065b6..34f572271c0c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -424,7 +424,25 @@ enum skb_drop_reason { SKB_DROP_REASON_DEV_HDR, /* device driver specific * header/metadata is invalid */ + /* the device is not ready to xmit/recv due to any of its data + * structure that is not up/ready/initialized, e.g., the IFF_UP is + * not set, or driver specific tun->tfiles[txq] is not initialized + */ + SKB_DROP_REASON_DEV_READY, SKB_DROP_REASON_FULL_RING, /* ring buffer is full */ + SKB_DROP_REASON_NOMEM, /* error due to OOM */ + SKB_DROP_REASON_HDR_TRUNC, /* failed to trunc/extract the header + * from networking data, e.g., failed + * to pull the protocol header from + * frags via pskb_may_pull() + */ + SKB_DROP_REASON_TAP_FILTER, /* dropped by (ebpf) filter directly + * attached to tun/tap, e.g., via + * TUNSETFILTEREBPF + */ + SKB_DROP_REASON_TAP_TXFILTER, /* dropped by tx filter implemented + * at tun/tap, e.g., check_filter() + */ SKB_DROP_REASON_MAX, }; diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 240e7e7591fc..e1670e1e4934 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -55,7 +55,12 @@ EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG) \ EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT) \ EM(SKB_DROP_REASON_DEV_HDR, DEV_HDR) \ + EM(SKB_DROP_REASON_DEV_READY, DEV_READY) \ EM(SKB_DROP_REASON_FULL_RING, FULL_RING) \ + EM(SKB_DROP_REASON_NOMEM, NOMEM) \ + EM(SKB_DROP_REASON_HDR_TRUNC, HDR_TRUNC) \ + EM(SKB_DROP_REASON_TAP_FILTER, TAP_FILTER) \ + EM(SKB_DROP_REASON_TAP_TXFILTER, TAP_TXFILTER) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM -- cgit v1.2.3-71-gd317 From f72de02ebece2e962462bc0c1e9efd29eaa029b2 Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Sat, 5 Mar 2022 12:21:25 +0100 Subject: ptp: Add generic PTP is_sync() function PHY drivers such as micrel or dp83640 need to analyze whether a given skb is a PTP sync message for one step functionality. In order to avoid code duplication introduce a generic function and move it to ptp classify. Signed-off-by: Kurt Kanzenbach Signed-off-by: David S. Miller --- include/linux/ptp_classify.h | 15 +++++++++++++++ net/core/ptp_classifier.c | 12 ++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 9afd34a2d36c..fefa7790dc46 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -126,6 +126,17 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, return msgtype; } +/** + * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message + * @skb: packet buffer + * @type: type of the packet (see ptp_classify_raw()) + * + * This function evaluates whether the given skb is a PTP Sync message. + * + * Return: true if sync message, false otherwise + */ +bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type); + void __init ptp_classifier_init(void); #else static inline void ptp_classifier_init(void) @@ -148,5 +159,9 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, */ return PTP_MSGTYPE_SYNC; } +static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type) +{ + return false; +} #endif #endif /* _PTP_CLASSIFY_H_ */ diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c index dd4cf01d1e0a..598041b0499e 100644 --- a/net/core/ptp_classifier.c +++ b/net/core/ptp_classifier.c @@ -137,6 +137,18 @@ struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type) } EXPORT_SYMBOL_GPL(ptp_parse_header); +bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type) +{ + struct ptp_header *hdr; + + hdr = ptp_parse_header(skb, type); + if (!hdr) + return false; + + return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC; +} +EXPORT_SYMBOL_GPL(ptp_msg_is_sync); + void __init ptp_classifier_init(void) { static struct sock_filter ptp_filter[] __initdata = { -- cgit v1.2.3-71-gd317 From 2655926aea9beea62c9ba80c032485456fd848f0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sun, 6 Mar 2022 22:57:52 +0100 Subject: net: Remove netif_rx_any_context() and netif_rx_ni(). Remove netif_rx_any_context and netif_rx_ni() because there are no more users in tree. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- include/linux/netdevice.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 19a27ac361ef..29a850a8d460 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3718,16 +3718,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); int __netif_rx(struct sk_buff *skb); -static inline int netif_rx_ni(struct sk_buff *skb) -{ - return netif_rx(skb); -} - -static inline int netif_rx_any_context(struct sk_buff *skb) -{ - return netif_rx(skb); -} - int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); void netif_receive_skb_list_internal(struct list_head *head); -- cgit v1.2.3-71-gd317 From 23c7f8d7989e1646aac82f75761b7648c355cb8a Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 7 Mar 2022 13:11:41 +0100 Subject: net: Fix esp GSO on inter address family tunnels. The esp tunnel GSO handlers use skb_mac_gso_segment to push the inner packet to the segmentation handlers. However, skb_mac_gso_segment takes the Ethernet Protocol ID from 'skb->protocol' which is wrong for inter address family tunnels. We fix this by introducing a new skb_eth_gso_segment function. This function can be used if it is necessary to pass the Ethernet Protocol ID directly to the segmentation handler. First users of this function will be the esp4 and esp6 tunnel segmentation handlers. Fixes: c35fe4106b92 ("xfrm: Add mode handlers for IPsec on layer 2") Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 2 ++ net/core/gro.c | 25 +++++++++++++++++++++++++ net/ipv4/esp4_offload.c | 3 +-- net/ipv6/esp6_offload.c | 3 +-- 4 files changed, 29 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8b5a314db167..f53ea7038441 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4602,6 +4602,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); +struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, + netdev_features_t features, __be16 type); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/net/core/gro.c b/net/core/gro.c index a11b286d1495..b7d2b0dc59a2 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -92,6 +92,31 @@ void dev_remove_offload(struct packet_offload *po) } EXPORT_SYMBOL(dev_remove_offload); +/** + * skb_eth_gso_segment - segmentation handler for ethernet protocols. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + * @type: Ethernet Protocol ID + */ +struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, + netdev_features_t features, __be16 type) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &offload_base, list) { + if (ptype->type == type && ptype->callbacks.gso_segment) { + segs = ptype->callbacks.gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + return segs; +} +EXPORT_SYMBOL(skb_eth_gso_segment); + /** * skb_mac_gso_segment - mac layer segmentation handler. * @skb: buffer to segment diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 146d4d54830c..935026f4c807 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -110,8 +110,7 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) { - __skb_push(skb, skb->mac_len); - return skb_mac_gso_segment(skb, features); + return skb_eth_gso_segment(skb, features, htons(ETH_P_IP)); } static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index e61172d50817..3a293838a91d 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -145,8 +145,7 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) { - __skb_push(skb, skb->mac_len); - return skb_mac_gso_segment(skb, features); + return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6)); } static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x, -- cgit v1.2.3-71-gd317 From 97939610b893de068c82c347d06319cd231a4602 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 4 Mar 2022 19:01:05 +0100 Subject: block: remove bio_devname All callers are gone, so remove this wrapper. Signed-off-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20220304180105.409765-11-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 6 ------ include/linux/bio.h | 2 -- 2 files changed, 8 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index b15f5466ce08..151cace2dbe1 100644 --- a/block/bio.c +++ b/block/bio.c @@ -807,12 +807,6 @@ int bio_init_clone(struct block_device *bdev, struct bio *bio, } EXPORT_SYMBOL(bio_init_clone); -const char *bio_devname(struct bio *bio, char *buf) -{ - return bdevname(bio->bi_bdev, buf); -} -EXPORT_SYMBOL(bio_devname); - /** * bio_full - check if the bio is full * @bio: bio to check diff --git a/include/linux/bio.h b/include/linux/bio.h index 7523aba4ddf7..4c21f6e69e18 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -491,8 +491,6 @@ static inline void bio_release_pages(struct bio *bio, bool mark_dirty) __bio_release_pages(bio, mark_dirty); } -extern const char *bio_devname(struct bio *bio, char *buffer); - #define bio_dev(bio) \ disk_devt((bio)->bi_bdev->bd_disk) -- cgit v1.2.3-71-gd317 From a26d84633c2ba3c41fd7692986fb2231c9228c4e Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 23 Feb 2022 18:59:02 +0100 Subject: rtc: max77686: Rename day-of-month defines RTC_DATE and REG_RTC_DATE are used for the registers holding the day of month. Rename these constants to mean what they mean. Signed-off-by: Luca Ceresoli Reviewed-by: Krzysztof Kozlowski Acked-by: Alexandre Belloni Signed-off-by: Lee Jones --- drivers/mfd/max77686.c | 2 +- drivers/rtc/rtc-max77686.c | 16 ++++++++-------- include/linux/mfd/max77686-private.h | 4 ++-- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index f9e12ab2bc75..2ac64277fb84 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -87,7 +87,7 @@ static bool max77802_rtc_is_volatile_reg(struct device *dev, unsigned int reg) reg == MAX77802_RTC_WEEKDAY || reg == MAX77802_RTC_MONTH || reg == MAX77802_RTC_YEAR || - reg == MAX77802_RTC_DATE); + reg == MAX77802_RTC_MONTHDAY); } static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg) diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c index bac52cdea97d..7e765207f28e 100644 --- a/drivers/rtc/rtc-max77686.c +++ b/drivers/rtc/rtc-max77686.c @@ -57,7 +57,7 @@ enum { RTC_WEEKDAY, RTC_MONTH, RTC_YEAR, - RTC_DATE, + RTC_MONTHDAY, RTC_NR_TIME }; @@ -119,7 +119,7 @@ enum max77686_rtc_reg_offset { REG_RTC_WEEKDAY, REG_RTC_MONTH, REG_RTC_YEAR, - REG_RTC_DATE, + REG_RTC_MONTHDAY, REG_ALARM1_SEC, REG_ALARM1_MIN, REG_ALARM1_HOUR, @@ -150,7 +150,7 @@ static const unsigned int max77686_map[REG_RTC_END] = { [REG_RTC_WEEKDAY] = MAX77686_RTC_WEEKDAY, [REG_RTC_MONTH] = MAX77686_RTC_MONTH, [REG_RTC_YEAR] = MAX77686_RTC_YEAR, - [REG_RTC_DATE] = MAX77686_RTC_DATE, + [REG_RTC_MONTHDAY] = MAX77686_RTC_MONTHDAY, [REG_ALARM1_SEC] = MAX77686_ALARM1_SEC, [REG_ALARM1_MIN] = MAX77686_ALARM1_MIN, [REG_ALARM1_HOUR] = MAX77686_ALARM1_HOUR, @@ -233,7 +233,7 @@ static const unsigned int max77802_map[REG_RTC_END] = { [REG_RTC_WEEKDAY] = MAX77802_RTC_WEEKDAY, [REG_RTC_MONTH] = MAX77802_RTC_MONTH, [REG_RTC_YEAR] = MAX77802_RTC_YEAR, - [REG_RTC_DATE] = MAX77802_RTC_DATE, + [REG_RTC_MONTHDAY] = MAX77802_RTC_MONTHDAY, [REG_ALARM1_SEC] = MAX77802_ALARM1_SEC, [REG_ALARM1_MIN] = MAX77802_ALARM1_MIN, [REG_ALARM1_HOUR] = MAX77802_ALARM1_HOUR, @@ -288,7 +288,7 @@ static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm, /* Only a single bit is set in data[], so fls() would be equivalent */ tm->tm_wday = ffs(data[RTC_WEEKDAY] & mask) - 1; - tm->tm_mday = data[RTC_DATE] & 0x1f; + tm->tm_mday = data[RTC_MONTHDAY] & 0x1f; tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1; tm->tm_year = data[RTC_YEAR] & mask; tm->tm_yday = 0; @@ -309,7 +309,7 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data, data[RTC_MIN] = tm->tm_min; data[RTC_HOUR] = tm->tm_hour; data[RTC_WEEKDAY] = 1 << tm->tm_wday; - data[RTC_DATE] = tm->tm_mday; + data[RTC_MONTHDAY] = tm->tm_mday; data[RTC_MONTH] = tm->tm_mon + 1; if (info->drv_data->alarm_enable_reg) { @@ -565,8 +565,8 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info) data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT); if (data[RTC_YEAR] & info->drv_data->mask) data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT); - if (data[RTC_DATE] & 0x1f) - data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT); + if (data[RTC_MONTHDAY] & 0x1f) + data[RTC_MONTHDAY] |= (1 << ALARM_ENABLE_SHIFT); ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC], data, ARRAY_SIZE(data)); diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index b1482b3cf353..3acceeedbaba 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -152,7 +152,7 @@ enum max77686_rtc_reg { MAX77686_RTC_WEEKDAY = 0x0A, MAX77686_RTC_MONTH = 0x0B, MAX77686_RTC_YEAR = 0x0C, - MAX77686_RTC_DATE = 0x0D, + MAX77686_RTC_MONTHDAY = 0x0D, MAX77686_ALARM1_SEC = 0x0E, MAX77686_ALARM1_MIN = 0x0F, MAX77686_ALARM1_HOUR = 0x10, @@ -352,7 +352,7 @@ enum max77802_rtc_reg { MAX77802_RTC_WEEKDAY = 0xCA, MAX77802_RTC_MONTH = 0xCB, MAX77802_RTC_YEAR = 0xCC, - MAX77802_RTC_DATE = 0xCD, + MAX77802_RTC_MONTHDAY = 0xCD, MAX77802_RTC_AE1 = 0xCE, MAX77802_ALARM1_SEC = 0xCF, MAX77802_ALARM1_MIN = 0xD0, -- cgit v1.2.3-71-gd317 From 60b050ff3a60273d56b4017d0f45d5ca71aae5ad Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 23 Feb 2022 18:59:05 +0100 Subject: mfd: max77714: Add driver for Maxim MAX77714 PMIC Add a simple driver for the Maxim MAX77714 PMIC, supporting RTC and watchdog only. Signed-off-by: Luca Ceresoli Reviewed-by: Krzysztof Kozlowski Signed-off-by: Lee Jones --- MAINTAINERS | 2 + drivers/mfd/Kconfig | 14 ++++ drivers/mfd/Makefile | 1 + drivers/mfd/max77714.c | 152 +++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/max77714.h | 60 +++++++++++++++++ 5 files changed, 229 insertions(+) create mode 100644 drivers/mfd/max77714.c create mode 100644 include/linux/mfd/max77714.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 4cb0e77b47ef..6b7c58e7ce84 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11695,6 +11695,8 @@ MAXIM MAX77714 PMIC MFD DRIVER M: Luca Ceresoli S: Maintained F: Documentation/devicetree/bindings/mfd/maxim,max77714.yaml +F: drivers/mfd/max77714.c +F: include/linux/mfd/max77714.h MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER M: Javier Martinez Canillas diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index ba0b3eb131f1..036ddf68c814 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -849,6 +849,20 @@ config MFD_MAX77693 additional drivers must be enabled in order to use the functionality of the device. +config MFD_MAX77714 + tristate "Maxim Semiconductor MAX77714 PMIC Support" + depends on I2C + depends on OF || COMPILE_TEST + select MFD_CORE + select REGMAP_I2C + help + Say yes here to add support for Maxim Semiconductor MAX77714. + This is a Power Management IC with 4 buck regulators, 9 + low-dropout regulators, 8 GPIOs, RTC, watchdog etc. This driver + provides common support for accessing the device; additional + drivers must be enabled in order to use each functionality of the + device. + config MFD_MAX77843 bool "Maxim Semiconductor MAX77843 PMIC Support" depends on I2C=y diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index df1ecc4a4c95..eea70cb9f9b9 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -162,6 +162,7 @@ obj-$(CONFIG_MFD_MAX77620) += max77620.o obj-$(CONFIG_MFD_MAX77650) += max77650.o obj-$(CONFIG_MFD_MAX77686) += max77686.o obj-$(CONFIG_MFD_MAX77693) += max77693.o +obj-$(CONFIG_MFD_MAX77714) += max77714.o obj-$(CONFIG_MFD_MAX77843) += max77843.o obj-$(CONFIG_MFD_MAX8907) += max8907.o max8925-objs := max8925-core.o max8925-i2c.o diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c new file mode 100644 index 000000000000..d1e4247800d2 --- /dev/null +++ b/drivers/mfd/max77714.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Maxim MAX77714 Core Driver + * + * Copyright (C) 2022 Luca Ceresoli + * Author: Luca Ceresoli + */ + +#include +#include +#include +#include +#include +#include +#include + +static const struct mfd_cell max77714_cells[] = { + { .name = "max77714-watchdog" }, + { .name = "max77714-rtc" }, +}; + +static const struct regmap_range max77714_readable_ranges[] = { + regmap_reg_range(MAX77714_INT_TOP, MAX77714_INT_TOP), + regmap_reg_range(MAX77714_INT_TOPM, MAX77714_INT_TOPM), + regmap_reg_range(MAX77714_32K_STATUS, MAX77714_32K_CONFIG), + regmap_reg_range(MAX77714_CNFG_GLBL2, MAX77714_CNFG2_ONOFF), +}; + +static const struct regmap_range max77714_writable_ranges[] = { + regmap_reg_range(MAX77714_INT_TOPM, MAX77714_INT_TOPM), + regmap_reg_range(MAX77714_32K_CONFIG, MAX77714_32K_CONFIG), + regmap_reg_range(MAX77714_CNFG_GLBL2, MAX77714_CNFG2_ONOFF), +}; + +static const struct regmap_access_table max77714_readable_table = { + .yes_ranges = max77714_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(max77714_readable_ranges), +}; + +static const struct regmap_access_table max77714_writable_table = { + .yes_ranges = max77714_writable_ranges, + .n_yes_ranges = ARRAY_SIZE(max77714_writable_ranges), +}; + +static const struct regmap_config max77714_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MAX77714_CNFG2_ONOFF, + .rd_table = &max77714_readable_table, + .wr_table = &max77714_writable_table, +}; + +static const struct regmap_irq max77714_top_irqs[] = { + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_ONOFF, 0, MAX77714_INT_TOP_ONOFF), + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_RTC, 0, MAX77714_INT_TOP_RTC), + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_GPIO, 0, MAX77714_INT_TOP_GPIO), + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_LDO, 0, MAX77714_INT_TOP_LDO), + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_SD, 0, MAX77714_INT_TOP_SD), + REGMAP_IRQ_REG(MAX77714_IRQ_TOP_GLBL, 0, MAX77714_INT_TOP_GLBL), +}; + +static const struct regmap_irq_chip max77714_irq_chip = { + .name = "max77714-pmic", + .status_base = MAX77714_INT_TOP, + .mask_base = MAX77714_INT_TOPM, + .num_regs = 1, + .irqs = max77714_top_irqs, + .num_irqs = ARRAY_SIZE(max77714_top_irqs), +}; + +/* + * MAX77714 initially uses the internal, low precision oscillator. Enable + * the external oscillator by setting the XOSC_RETRY bit. If the external + * oscillator is not OK (probably not installed) this has no effect. + */ +static int max77714_setup_xosc(struct device *dev, struct regmap *regmap) +{ + /* Internal Crystal Load Capacitance, indexed by value of 32KLOAD bits */ + static const unsigned int load_cap[4] = {0, 10, 12, 22}; /* pF */ + unsigned int load_cap_idx; + unsigned int status; + int err; + + err = regmap_update_bits(regmap, MAX77714_32K_CONFIG, + MAX77714_32K_CONFIG_XOSC_RETRY, + MAX77714_32K_CONFIG_XOSC_RETRY); + if (err) + return dev_err_probe(dev, err, "Failed to configure the external oscillator\n"); + + err = regmap_read(regmap, MAX77714_32K_STATUS, &status); + if (err) + return dev_err_probe(dev, err, "Failed to read external oscillator status\n"); + + load_cap_idx = (status >> MAX77714_32K_STATUS_32KLOAD_SHF) + & MAX77714_32K_STATUS_32KLOAD_MSK; + + dev_info(dev, "Using %s oscillator, %d pF load cap\n", + status & MAX77714_32K_STATUS_32KSOURCE ? "internal" : "external", + load_cap[load_cap_idx]); + + return 0; +} + +static int max77714_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + int err; + + regmap = devm_regmap_init_i2c(client, &max77714_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialise regmap\n"); + + err = max77714_setup_xosc(dev, regmap); + if (err) + return err; + + err = devm_regmap_add_irq_chip(dev, regmap, client->irq, + IRQF_ONESHOT | IRQF_SHARED, 0, + &max77714_irq_chip, &irq_data); + if (err) + return dev_err_probe(dev, err, "Failed to add PMIC IRQ chip\n"); + + err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + max77714_cells, ARRAY_SIZE(max77714_cells), + NULL, 0, NULL); + if (err) + return dev_err_probe(dev, err, "Failed to register child devices\n"); + + return 0; +} + +static const struct of_device_id max77714_dt_match[] = { + { .compatible = "maxim,max77714" }, + {}, +}; +MODULE_DEVICE_TABLE(of, max77714_dt_match); + +static struct i2c_driver max77714_driver = { + .driver = { + .name = "max77714", + .of_match_table = max77714_dt_match, + }, + .probe_new = max77714_probe, +}; +module_i2c_driver(max77714_driver); + +MODULE_DESCRIPTION("Maxim MAX77714 MFD core driver"); +MODULE_AUTHOR("Luca Ceresoli "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h new file mode 100644 index 000000000000..a970dc455426 --- /dev/null +++ b/include/linux/mfd/max77714.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Maxim MAX77714 Register and data structures definition. + * + * Copyright (C) 2022 Luca Ceresoli + * Author: Luca Ceresoli + */ + +#ifndef __LINUX_MFD_MAX77714_H_ +#define __LINUX_MFD_MAX77714_H_ + +#include + +#define MAX77714_INT_TOP 0x00 +#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */ + +#define MAX77714_INT_TOP_ONOFF BIT(1) +#define MAX77714_INT_TOP_RTC BIT(3) +#define MAX77714_INT_TOP_GPIO BIT(4) +#define MAX77714_INT_TOP_LDO BIT(5) +#define MAX77714_INT_TOP_SD BIT(6) +#define MAX77714_INT_TOP_GLBL BIT(7) + +#define MAX77714_32K_STATUS 0x30 +#define MAX77714_32K_STATUS_SIOSCOK BIT(5) +#define MAX77714_32K_STATUS_XOSCOK BIT(4) +#define MAX77714_32K_STATUS_32KSOURCE BIT(3) +#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3 +#define MAX77714_32K_STATUS_32KLOAD_SHF 1 +#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0) + +#define MAX77714_32K_CONFIG 0x31 +#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4) + +#define MAX77714_CNFG_GLBL2 0x91 +#define MAX77714_WDTEN BIT(2) +#define MAX77714_WDTSLPC BIT(3) +#define MAX77714_TWD_MASK 0x3 +#define MAX77714_TWD_2s 0x0 +#define MAX77714_TWD_16s 0x1 +#define MAX77714_TWD_64s 0x2 +#define MAX77714_TWD_128s 0x3 + +#define MAX77714_CNFG_GLBL3 0x92 +#define MAX77714_WDTC BIT(0) + +#define MAX77714_CNFG2_ONOFF 0x94 +#define MAX77714_WD_RST_WK BIT(5) + +/* Interrupts */ +enum { + MAX77714_IRQ_TOP_ONOFF, + MAX77714_IRQ_TOP_RTC, /* Real-time clock */ + MAX77714_IRQ_TOP_GPIO, /* GPIOs */ + MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */ + MAX77714_IRQ_TOP_SD, /* Step-down regulators */ + MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */ +}; + +#endif /* __LINUX_MFD_MAX77714_H_ */ -- cgit v1.2.3-71-gd317 From c47383f849097c2b3547e28365578cd9e5811378 Mon Sep 17 00:00:00 2001 From: Johnson Wang Date: Thu, 6 Jan 2022 14:54:04 +0800 Subject: mfd: Add support for the MediaTek MT6366 PMIC This adds support for the MediaTek MT6366 PMIC. This is a multifunction device with the following sub modules: - Regulator - RTC - Codec - Interrupt It is interfaced to the host controller using SPI interface by a proprietary hardware called PMIC wrapper or pwrap. MT6366 MFD is a child device of the pwrap. Signed-off-by: Johnson Wang Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20220106065407.16036-2-johnson.wang@mediatek.com --- drivers/mfd/mt6358-irq.c | 1 + include/linux/mfd/mt6358/registers.h | 7 +++++++ include/linux/mfd/mt6397/core.h | 1 + 3 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c index 83f3ffbdbb4c..ea5e452510eb 100644 --- a/drivers/mfd/mt6358-irq.c +++ b/drivers/mfd/mt6358-irq.c @@ -212,6 +212,7 @@ int mt6358_irq_init(struct mt6397_chip *chip) switch (chip->chip_id) { case MT6358_CHIP_ID: + case MT6366_CHIP_ID: chip->irq_data = &mt6358_irqd; break; diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 201139b12140..3d33517f178c 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -94,6 +94,10 @@ #define MT6358_BUCK_VCORE_CON0 0x1488 #define MT6358_BUCK_VCORE_DBG0 0x149e #define MT6358_BUCK_VCORE_DBG1 0x14a0 +#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4 +#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6 +#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1 +#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1 #define MT6358_BUCK_VCORE_ELR0 0x14aa #define MT6358_BUCK_VGPU_CON0 0x1508 #define MT6358_BUCK_VGPU_DBG0 0x151e @@ -169,6 +173,9 @@ #define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6 #define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0 #define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1 #define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8 #define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2 #define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4 diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 56f210eebc54..1cf78726503b 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -14,6 +14,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, MT6358_CHIP_ID = 0x58, MT6359_CHIP_ID = 0x59, + MT6366_CHIP_ID = 0x66, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; -- cgit v1.2.3-71-gd317 From aa6f8dcbab473f3a3c7454b74caa46d36cdc5d13 Mon Sep 17 00:00:00 2001 From: Halil Pasic Date: Sat, 5 Mar 2022 18:07:14 +0100 Subject: swiotlb: rework "fix info leak with DMA_FROM_DEVICE" Unfortunately, we ended up merging an old version of the patch "fix info leak with DMA_FROM_DEVICE" instead of merging the latest one. Christoph (the swiotlb maintainer), he asked me to create an incremental fix (after I have pointed this out the mix up, and asked him for guidance). So here we go. The main differences between what we got and what was agreed are: * swiotlb_sync_single_for_device is also required to do an extra bounce * We decided not to introduce DMA_ATTR_OVERWRITE until we have exploiters * The implantation of DMA_ATTR_OVERWRITE is flawed: DMA_ATTR_OVERWRITE must take precedence over DMA_ATTR_SKIP_CPU_SYNC Thus this patch removes DMA_ATTR_OVERWRITE, and makes swiotlb_sync_single_for_device() bounce unconditionally (that is, also when dir == DMA_TO_DEVICE) in order do avoid synchronising back stale data from the swiotlb buffer. Let me note, that if the size used with dma_sync_* API is less than the size used with dma_[un]map_*, under certain circumstances we may still end up with swiotlb not being transparent. In that sense, this is no perfect fix either. To get this bullet proof, we would have to bounce the entire mapping/bounce buffer. For that we would have to figure out the starting address, and the size of the mapping in swiotlb_sync_single_for_device(). While this does seem possible, there seems to be no firm consensus on how things are supposed to work. Signed-off-by: Halil Pasic Fixes: ddbd89deb7d3 ("swiotlb: fix info leak with DMA_FROM_DEVICE") Cc: stable@vger.kernel.org Reviewed-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- Documentation/core-api/dma-attributes.rst | 8 -------- include/linux/dma-mapping.h | 8 -------- kernel/dma/swiotlb.c | 23 +++++++++++++++-------- 3 files changed, 15 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst index 17706dc91ec9..1887d92e8e92 100644 --- a/Documentation/core-api/dma-attributes.rst +++ b/Documentation/core-api/dma-attributes.rst @@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged subsystem that the buffer is fully accessible at the elevated privilege level (and ideally inaccessible or at least read-only at the lesser-privileged levels). - -DMA_ATTR_OVERWRITE ------------------- - -This is a hint to the DMA-mapping subsystem that the device is expected to -overwrite the entire mapped size, thus the caller does not require any of the -previous buffer contents to be preserved. This allows bounce-buffering -implementations to optimise DMA_FROM_DEVICE transfers. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 6150d11a607e..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -61,14 +61,6 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) -/* - * This is a hint to the DMA-mapping subsystem that the device is expected - * to overwrite the entire mapped size, thus the caller does not require any - * of the previous buffer contents to be preserved. This allows - * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. - */ -#define DMA_ATTR_OVERWRITE (1UL << 10) - /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can * be given to a device to use as a DMA source or target. It is specific to a diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index bfc56cb21705..6db1c475ec82 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -627,10 +627,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, for (i = 0; i < nr_slots(alloc_size + offset); i++) mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); tlb_addr = slot_addr(mem->start, index) + offset; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); + /* + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig + * to the tlb buffer, if we knew for sure the device will + * overwirte the entire current content. But we don't. Thus + * unconditional bounce may prevent leaking swiotlb content (i.e. + * kernel memory) to user-space. + */ + swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } @@ -697,10 +701,13 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) - swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); - else - BUG_ON(dir != DMA_FROM_DEVICE); + /* + * Unconditional bounce is necessary to avoid corruption on + * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite + * the whole lengt of the bounce buffer. + */ + swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); + BUG_ON(!valid_dma_direction(dir)); } void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, -- cgit v1.2.3-71-gd317 From c75e707fe1aab32f1dc8e09845533b6542d9aaa9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 4 Mar 2022 18:55:56 +0100 Subject: block: remove the per-bio/request write hint With the NVMe support for this gone, there are no consumers of these hints left, so remove them. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220304175556.407719-2-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 2 -- block/blk-crypto-fallback.c | 1 - block/blk-merge.c | 14 -------------- block/blk-mq-debugfs.c | 24 ------------------------ block/blk-mq.c | 1 - block/bounce.c | 1 - block/fops.c | 3 --- drivers/md/raid1.c | 2 -- drivers/md/raid5-ppl.c | 28 +++------------------------- drivers/md/raid5.c | 6 ------ fs/btrfs/extent_io.c | 1 - fs/buffer.c | 13 +++++-------- fs/direct-io.c | 3 --- fs/ext4/page-io.c | 5 +---- fs/f2fs/data.c | 2 -- fs/gfs2/lops.c | 1 - fs/iomap/buffered-io.c | 2 -- fs/iomap/direct-io.c | 1 - fs/mpage.c | 1 - fs/zonefs/super.c | 1 - include/linux/blk_types.h | 1 - include/linux/blkdev.h | 3 --- 22 files changed, 9 insertions(+), 107 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 151cace2dbe1..3c57b3ba727d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -257,7 +257,6 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_opf = opf; bio->bi_flags = 0; bio->bi_ioprio = 0; - bio->bi_write_hint = 0; bio->bi_status = 0; bio->bi_iter.bi_sector = 0; bio->bi_iter.bi_size = 0; @@ -737,7 +736,6 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); bio->bi_ioprio = bio_src->bi_ioprio; - bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio_clone_blkg_association(bio, bio_src); diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 18c8eafe20b9..7c854584b52b 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -170,7 +170,6 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) bio_set_flag(bio, BIO_REMAPPED); bio->bi_opf = bio_src->bi_opf; bio->bi_ioprio = bio_src->bi_ioprio; - bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; diff --git a/block/blk-merge.c b/block/blk-merge.c index f5255991b773..0e871d4e7cb8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -782,13 +782,6 @@ static struct request *attempt_merge(struct request_queue *q, !blk_write_same_mergeable(req->bio, next->bio)) return NULL; - /* - * Don't allow merge of different write hints, or for a hint with - * non-hint IO. - */ - if (req->write_hint != next->write_hint) - return NULL; - if (req->ioprio != next->ioprio) return NULL; @@ -915,13 +908,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) !blk_write_same_mergeable(rq->bio, bio)) return false; - /* - * Don't allow merge of different write hints, or for a hint with - * non-hint IO. - */ - if (rq->write_hint != bio->bi_write_hint) - return false; - if (rq->ioprio != bio_prio(bio)) return false; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3a790eb4995c..c2904c75c160 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -183,35 +183,11 @@ inval: return count; } -static int queue_write_hint_show(void *data, struct seq_file *m) -{ - struct request_queue *q = data; - int i; - - for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) - seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); - - return 0; -} - -static ssize_t queue_write_hint_store(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct request_queue *q = data; - int i; - - for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) - q->write_hints[i] = 0; - - return count; -} - static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, - { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, { }, }; diff --git a/block/blk-mq.c b/block/blk-mq.c index f1b067d06ab5..64d5c2edb817 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2406,7 +2406,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, rq->cmd_flags |= REQ_FAILFAST_MASK; rq->__sector = bio->bi_iter.bi_sector; - rq->write_hint = bio->bi_write_hint; blk_rq_bio_prep(rq, bio, nr_segs); /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ diff --git a/block/bounce.c b/block/bounce.c index 3d50d19cde72..9db1256d57d5 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -169,7 +169,6 @@ static struct bio *bounce_clone_bio(struct bio *bio_src) if (bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); bio->bi_ioprio = bio_src->bi_ioprio; - bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; diff --git a/block/fops.c b/block/fops.c index 7ccc4ff109ce..1c732b72de72 100644 --- a/block/fops.c +++ b/block/fops.c @@ -83,7 +83,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); } bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; - bio.bi_write_hint = iocb->ki_hint; bio.bi_private = current; bio.bi_end_io = blkdev_bio_end_io_simple; bio.bi_ioprio = iocb->ki_ioprio; @@ -225,7 +224,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, for (;;) { bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; - bio->bi_write_hint = iocb->ki_hint; bio->bi_private = dio; bio->bi_end_io = blkdev_bio_end_io; bio->bi_ioprio = iocb->ki_ioprio; @@ -327,7 +325,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, dio->flags = 0; dio->iocb = iocb; bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; - bio->bi_write_hint = iocb->ki_hint; bio->bi_end_io = blkdev_bio_end_io_async; bio->bi_ioprio = iocb->ki_ioprio; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 03477e971699..370ab3218534 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1137,8 +1137,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, goto skip_copy; } - behind_bio->bi_write_hint = bio->bi_write_hint; - while (i < vcnt && size) { struct page *page; int len = min_t(int, PAGE_SIZE, size); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index bbb5673104ec..388ab41d1a11 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -467,7 +467,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio_set_dev(bio, log->rdev->bdev); bio->bi_iter.bi_sector = log->next_io_sector; bio_add_page(bio, io->header_page, PAGE_SIZE, 0); - bio->bi_write_hint = ppl_conf->write_hint; pr_debug("%s: log->current_io_sector: %llu\n", __func__, (unsigned long long)log->next_io_sector); @@ -497,7 +496,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io) bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOIO, &ppl_conf->bs); - bio->bi_write_hint = prev->bi_write_hint; bio->bi_iter.bi_sector = bio_end_sector(prev); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); @@ -1397,7 +1395,6 @@ int ppl_init_log(struct r5conf *conf) atomic64_set(&ppl_conf->seq, 0); INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); spin_lock_init(&ppl_conf->no_mem_stripes_lock); - ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET; if (!mddev->external) { ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); @@ -1496,25 +1493,13 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) static ssize_t ppl_write_hint_show(struct mddev *mddev, char *buf) { - size_t ret = 0; - struct r5conf *conf; - struct ppl_conf *ppl_conf = NULL; - - spin_lock(&mddev->lock); - conf = mddev->private; - if (conf && raid5_has_ppl(conf)) - ppl_conf = conf->log_private; - ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0); - spin_unlock(&mddev->lock); - - return ret; + return sprintf(buf, "%d\n", 0); } static ssize_t ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf; - struct ppl_conf *ppl_conf; int err = 0; unsigned short new; @@ -1528,17 +1513,10 @@ ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len) return err; conf = mddev->private; - if (!conf) { + if (!conf) err = -ENODEV; - } else if (raid5_has_ppl(conf)) { - ppl_conf = conf->log_private; - if (!ppl_conf) - err = -EINVAL; - else - ppl_conf->write_hint = new; - } else { + else if (!raid5_has_ppl(conf) || !conf->log_private) err = -EINVAL; - } mddev_unlock(mddev); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8891aaba6596..78503db55ca4 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1210,9 +1210,6 @@ again: bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); - bi->bi_write_hint = sh->dev[i].write_hint; - if (!rrdev) - sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -1264,8 +1261,6 @@ again: rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); - rbi->bi_write_hint = sh->dev[i].write_hint; - sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -3416,7 +3411,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, (unsigned long long)sh->sector); spin_lock_irq(&sh->stripe_lock); - sh->dev[dd_idx].write_hint = bi->bi_write_hint; /* Don't allow new IO added to stripes in batch list */ if (sh->batch_head) goto overlap; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5923eec8caa8..3b386bbb85a7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3321,7 +3321,6 @@ static int alloc_new_bio(struct btrfs_inode *inode, bio_ctrl->bio_flags = bio_flags; bio->bi_end_io = end_io_func; bio->bi_private = &inode->io_tree; - bio->bi_write_hint = inode->vfs_inode.i_write_hint; bio->bi_opf = opf; ret = calc_bio_boundaries(bio_ctrl, inode, file_offset); if (ret < 0) diff --git a/fs/buffer.c b/fs/buffer.c index a17c386a142c..29c6c60660f6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -53,7 +53,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, - enum rw_hint hint, struct writeback_control *wbc); + struct writeback_control *wbc); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) @@ -1806,8 +1806,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { - submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, - inode->i_write_hint, wbc); + submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); nr_underway++; } bh = next; @@ -1861,8 +1860,7 @@ recover: struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { clear_buffer_dirty(bh); - submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, - inode->i_write_hint, wbc); + submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); nr_underway++; } bh = next; @@ -3008,7 +3006,7 @@ static void end_bio_bh_io_sync(struct bio *bio) } static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, - enum rw_hint write_hint, struct writeback_control *wbc) + struct writeback_control *wbc) { struct bio *bio; @@ -3034,7 +3032,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio->bi_write_hint = write_hint; bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); BUG_ON(bio->bi_iter.bi_size != bh->b_size); @@ -3056,7 +3053,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, int submit_bh(int op, int op_flags, struct buffer_head *bh) { - return submit_bh_wbc(op, op_flags, bh, 0, NULL); + return submit_bh_wbc(op, op_flags, bh, NULL); } EXPORT_SYMBOL(submit_bh); diff --git a/fs/direct-io.c b/fs/direct-io.c index 38bca4980a1c..aef06e607b40 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -402,9 +402,6 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, bio->bi_end_io = dio_bio_end_aio; else bio->bi_end_io = dio_bio_end_io; - - bio->bi_write_hint = dio->iocb->ki_hint; - sdio->bio = bio; sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 17bb78ebd784..495ce59fb4ad 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -373,7 +373,6 @@ void ext4_io_submit(struct ext4_io_submit *io) if (bio) { if (io->io_wbc->sync_mode == WB_SYNC_ALL) io->io_bio->bi_opf |= REQ_SYNC; - io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint; submit_bio(io->io_bio); } io->io_bio = NULL; @@ -418,10 +417,8 @@ static void io_submit_add_bh(struct ext4_io_submit *io, submit_and_retry: ext4_io_submit(io); } - if (io->io_bio == NULL) { + if (io->io_bio == NULL) io_submit_init_bio(io, bh); - io->io_bio->bi_write_hint = inode->i_write_hint; - } ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index e71dde8de0db..20d65aa6243a 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -403,8 +403,6 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) } else { bio->bi_end_io = f2fs_write_end_io; bio->bi_private = sbi; - bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, - fio->type, fio->temp); } iostat_alloc_and_bind_ctx(sbi, bio, NULL); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 4ae1eefae616..6ba51cbb94cf 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -491,7 +491,6 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - new->bi_write_hint = prev->bi_write_hint; bio_chain(new, prev); submit_bio(prev); return new; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 4653f3d07a1d..d0bf43f37a64 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1232,7 +1232,6 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, REQ_OP_WRITE | wbc_to_write_flags(wbc), GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; - bio->bi_write_hint = inode->i_write_hint; wbc_init_bio(wbc, bio); ioend = container_of(bio, struct iomap_ioend, io_inline_bio); @@ -1263,7 +1262,6 @@ iomap_chain_bio(struct bio *prev) new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - new->bi_write_hint = prev->bi_write_hint; bio_chain(prev, new); bio_get(prev); /* for iomap_finish_ioend */ diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index e2ba13645ef2..a434b1829545 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -309,7 +309,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(iomap, pos); - bio->bi_write_hint = dio->iocb->ki_hint; bio->bi_ioprio = dio->iocb->ki_ioprio; bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; diff --git a/fs/mpage.c b/fs/mpage.c index 6c4b810a21d0..c6379a81f90f 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -588,7 +588,6 @@ alloc_new: GFP_NOFS); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); wbc_init_bio(wbc, bio); - bio->bi_write_hint = inode->i_write_hint; } /* diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index d331b52592a0..b71a23dd1255 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -695,7 +695,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) bio = bio_alloc(bdev, nr_pages, REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS); bio->bi_iter.bi_sector = zi->i_zsector; - bio->bi_write_hint = iocb->ki_hint; bio->bi_ioprio = iocb->ki_ioprio; if (iocb->ki_flags & IOCB_DSYNC) bio->bi_opf |= REQ_FUA; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5561e58d158a..ba8cfa57255f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -250,7 +250,6 @@ struct bio { */ unsigned short bi_flags; /* BIO_* below */ unsigned short bi_ioprio; - unsigned short bi_write_hint; blk_status_t bi_status; atomic_t __bi_remaining; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a12c031af887..226836ccd060 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -518,9 +518,6 @@ struct request_queue { bool mq_sysfs_init_done; -#define BLK_MAX_WRITE_HINTS 5 - u64 write_hints[BLK_MAX_WRITE_HINTS]; - /* * Independent sector access ranges. This is always NULL for * devices that do not have multiple independent access ranges. -- cgit v1.2.3-71-gd317 From c340b990d58c856c1636e0c10abb9e4351ad852a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:05 -0800 Subject: block: support pi with extended metadata The nvme spec allows protection information formats with metadata extending beyond the pi field. Use the actual size of the metadata field for incrementing the buffer. Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Signed-off-by: Keith Busch Link: https://lore.kernel.org/r/20220303201312.3255347-2-kbusch@kernel.org Signed-off-by: Jens Axboe --- block/bio-integrity.c | 1 + block/t10-pi.c | 4 ++-- include/linux/blk-integrity.h | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 6996e7bd66e9..32929c89ba8a 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -165,6 +165,7 @@ static blk_status_t bio_integrity_process(struct bio *bio, iter.disk_name = bio->bi_bdev->bd_disk->disk_name; iter.interval = 1 << bi->interval_exp; + iter.tuple_size = bi->tuple_size; iter.seed = proc_iter->bi_sector; iter.prot_buf = bvec_virt(bip->bip_vec); diff --git a/block/t10-pi.c b/block/t10-pi.c index 25a52a2a09a8..758a76518854 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -44,7 +44,7 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, pi->ref_tag = 0; iter->data_buf += iter->interval; - iter->prot_buf += sizeof(struct t10_pi_tuple); + iter->prot_buf += iter->tuple_size; iter->seed++; } @@ -93,7 +93,7 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, next: iter->data_buf += iter->interval; - iter->prot_buf += sizeof(struct t10_pi_tuple); + iter->prot_buf += iter->tuple_size; iter->seed++; } diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 8a038ea0717e..378b2459efe2 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -19,6 +19,7 @@ struct blk_integrity_iter { sector_t seed; unsigned int data_size; unsigned short interval; + unsigned char tuple_size; const char *disk_name; }; -- cgit v1.2.3-71-gd317 From 7ee8809df990d1de379002973baee1681e8d7dd3 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:08 -0800 Subject: linux/kernel: introduce lower_48_bits function Recent data integrity field enhancements allow reference tags to be up to 48 bits. Introduce an inline helper function since this will be a repeated operation. Suggested-by: Bart Van Assche Signed-off-by: Keith Busch Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20220303201312.3255347-5-kbusch@kernel.org Signed-off-by: Jens Axboe --- include/linux/kernel.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 33f47a996513..3877478681b9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -63,6 +63,15 @@ } \ ) +/** + * lower_48_bits() - return bits 0-47 of a number + * @n: the number we're accessing + */ +static inline u64 lower_48_bits(u64 n) +{ + return n & ((1ull << 48) - 1); +} + /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing -- cgit v1.2.3-71-gd317 From cbc0a40e17da361a2ada8d669413ccfbd2028f2d Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:09 -0800 Subject: lib: add rocksoft model crc64 The NVM Express specification extended data integrity fields to 64 bits using the Rocksoft parameters. Add the poly to the crc64 table generation, and provide a generic library routine implementing the algorithm. The Rocksoft 64-bit CRC model parameters are as follows: Poly: 0xAD93D23594C93659 Initial value: 0xFFFFFFFFFFFFFFFF Reflected Input: True Reflected Output: True Xor Final: 0xFFFFFFFFFFFFFFFF Since this model used reflected bits, the implementation generates the reflected table so the result is ordered consistently. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Martin K. Petersen Signed-off-by: Keith Busch Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220303201312.3255347-6-kbusch@kernel.org Signed-off-by: Jens Axboe --- include/linux/crc64.h | 2 ++ lib/crc64.c | 28 ++++++++++++++++++++++++++++ lib/gen_crc64table.c | 51 ++++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 70 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/crc64.h b/include/linux/crc64.h index c756e65a1b58..9480f38cc7cf 100644 --- a/include/linux/crc64.h +++ b/include/linux/crc64.h @@ -8,4 +8,6 @@ #include u64 __pure crc64_be(u64 crc, const void *p, size_t len); +u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len); + #endif /* _LINUX_CRC64_H */ diff --git a/lib/crc64.c b/lib/crc64.c index 9f852a89ee2a..61ae8dfb6a1c 100644 --- a/lib/crc64.c +++ b/lib/crc64.c @@ -22,6 +22,13 @@ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 + * x^7 + x^4 + x + 1 * + * crc64rocksoft[256] table is from the Rocksoft specification polynomial + * defined as, + * + * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 + + * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 + + * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1 + * * Copyright 2018 SUSE Linux. * Author: Coly Li */ @@ -55,3 +62,24 @@ u64 __pure crc64_be(u64 crc, const void *p, size_t len) return crc; } EXPORT_SYMBOL_GPL(crc64_be); + +/** + * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64 + * @crc: seed value for computation. 0 for a new CRC calculation, or the + * previous crc64 value if computing incrementally. + * @p: pointer to buffer over which CRC64 is run + * @len: length of buffer @p + */ +u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len) +{ + const unsigned char *_p = p; + size_t i; + + crc = ~crc; + + for (i = 0; i < len; i++) + crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++]; + + return ~crc; +} +EXPORT_SYMBOL_GPL(crc64_rocksoft_generic); diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c index 094b43aef8db..55e222acd0b8 100644 --- a/lib/gen_crc64table.c +++ b/lib/gen_crc64table.c @@ -17,10 +17,30 @@ #include #define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL +#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL static uint64_t crc64_table[256] = {0}; +static uint64_t crc64_rocksoft_table[256] = {0}; -static void generate_crc64_table(void) +static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly) +{ + uint64_t i, j, c, crc; + + for (i = 0; i < 256; i++) { + crc = 0ULL; + c = i; + + for (j = 0; j < 8; j++) { + if ((crc ^ (c >> j)) & 1) + crc = (crc >> 1) ^ poly; + else + crc >>= 1; + } + table[i] = crc; + } +} + +static void generate_crc64_table(uint64_t table[256], uint64_t poly) { uint64_t i, j, c, crc; @@ -30,26 +50,22 @@ static void generate_crc64_table(void) for (j = 0; j < 8; j++) { if ((crc ^ c) & 0x8000000000000000ULL) - crc = (crc << 1) ^ CRC64_ECMA182_POLY; + crc = (crc << 1) ^ poly; else crc <<= 1; c <<= 1; } - crc64_table[i] = crc; + table[i] = crc; } } -static void print_crc64_table(void) +static void output_table(uint64_t table[256]) { int i; - printf("/* this file is generated - do not edit */\n\n"); - printf("#include \n"); - printf("#include \n\n"); - printf("static const u64 ____cacheline_aligned crc64table[256] = {\n"); for (i = 0; i < 256; i++) { - printf("\t0x%016" PRIx64 "ULL", crc64_table[i]); + printf("\t0x%016" PRIx64 "ULL", table[i]); if (i & 0x1) printf(",\n"); else @@ -58,9 +74,22 @@ static void print_crc64_table(void) printf("};\n"); } +static void print_crc64_tables(void) +{ + printf("/* this file is generated - do not edit */\n\n"); + printf("#include \n"); + printf("#include \n\n"); + printf("static const u64 ____cacheline_aligned crc64table[256] = {\n"); + output_table(crc64_table); + + printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n"); + output_table(crc64_rocksoft_table); +} + int main(int argc, char *argv[]) { - generate_crc64_table(); - print_crc64_table(); + generate_crc64_table(crc64_table, CRC64_ECMA182_POLY); + generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY); + print_crc64_tables(); return 0; } -- cgit v1.2.3-71-gd317 From f3813f4b287e480b1fcd62ca798d8556644b8278 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:10 -0800 Subject: crypto: add rocksoft 64b crc guard tag framework Hardware specific features may be able to calculate a crc64, so provide a framework for drivers to register their implementation. If nothing is registered, fallback to the generic table lookup implementation. The implementation is modeled after the crct10dif equivalent. Signed-off-by: Keith Busch Link: https://lore.kernel.org/r/20220303201312.3255347-7-kbusch@kernel.org Signed-off-by: Jens Axboe --- crypto/Kconfig | 5 ++ crypto/Makefile | 1 + crypto/crc64_rocksoft_generic.c | 89 ++++++++++++++++++++++++++++ crypto/testmgr.c | 7 +++ crypto/testmgr.h | 15 +++++ include/linux/crc64.h | 5 ++ lib/Kconfig | 9 +++ lib/Makefile | 1 + lib/crc64-rocksoft.c | 126 ++++++++++++++++++++++++++++++++++++++++ 9 files changed, 258 insertions(+) create mode 100644 crypto/crc64_rocksoft_generic.c create mode 100644 lib/crc64-rocksoft.c (limited to 'include/linux') diff --git a/crypto/Kconfig b/crypto/Kconfig index 442765219c37..e88e2d00e33d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -735,6 +735,11 @@ config CRYPTO_CRCT10DIF_VPMSUM multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on POWER8 and newer processors for improved performance. +config CRYPTO_CRC64_ROCKSOFT + tristate "Rocksoft Model CRC64 algorithm" + depends on CRC64 + select CRYPTO_HASH + config CRYPTO_VPMSUM_TESTER tristate "Powerpc64 vpmsum hardware acceleration tester" depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM diff --git a/crypto/Makefile b/crypto/Makefile index d76bff8d0ffd..f754c4d17d6b 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -152,6 +152,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o +obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c new file mode 100644 index 000000000000..9e812bb26dba --- /dev/null +++ b/crypto/crc64_rocksoft_generic.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +static int chksum_init(struct shash_desc *desc) +{ + u64 *crc = shash_desc_ctx(desc); + + *crc = 0; + + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + u64 *crc = shash_desc_ctx(desc); + + *crc = crc64_rocksoft_generic(*crc, data, length); + + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + u64 *crc = shash_desc_ctx(desc); + + put_unaligned_le64(*crc, out); + return 0; +} + +static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out) +{ + crc = crc64_rocksoft_generic(crc, data, len); + put_unaligned_le64(crc, out); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + u64 *crc = shash_desc_ctx(desc); + + return __chksum_finup(*crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + return __chksum_finup(0, data, length, out); +} + +static struct shash_alg alg = { + .digestsize = sizeof(u64), + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(u64), + .base = { + .cra_name = CRC64_ROCKSOFT_STRING, + .cra_driver_name = "crc64-rocksoft-generic", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_module = THIS_MODULE, + } +}; + +static int __init crc64_rocksoft_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc64_rocksoft_exit(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc64_rocksoft_init); +module_exit(crc64_rocksoft_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Rocksoft model CRC64 calculation."); +MODULE_ALIAS_CRYPTO("crc64-rocksoft"); +MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5831d4bbc64f..2e120eea10b1 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4526,6 +4526,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(crc32c_tv_template) } + }, { + .alg = "crc64-rocksoft", + .test = alg_test_hash, + .fips_allowed = 1, + .suite = { + .hash = __VECS(crc64_rocksoft_tv_template) + } }, { .alg = "crct10dif", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index a253d66ba1c1..f1a22794c404 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3679,6 +3679,21 @@ static const struct hash_testvec rmd160_tv_template[] = { } }; +static const u8 zeroes[4096] = { [0 ... 4095] = 0 }; +static const u8 ones[4096] = { [0 ... 4095] = 0xff }; + +static const struct hash_testvec crc64_rocksoft_tv_template[] = { + { + .plaintext = zeroes, + .psize = 4096, + .digest = (u8 *)(u64[]){ 0x6482d367eb22b64eull }, + }, { + .plaintext = ones, + .psize = 4096, + .digest = (u8 *)(u64[]){ 0xc0ddba7302eca3acull }, + } +}; + static const struct hash_testvec crct10dif_tv_template[] = { { .plaintext = "abc", diff --git a/include/linux/crc64.h b/include/linux/crc64.h index 9480f38cc7cf..e044c60d1e61 100644 --- a/include/linux/crc64.h +++ b/include/linux/crc64.h @@ -7,7 +7,12 @@ #include +#define CRC64_ROCKSOFT_STRING "crc64-rocksoft" + u64 __pure crc64_be(u64 crc, const void *p, size_t len); u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len); +u64 crc64_rocksoft(const unsigned char *buffer, size_t len); +u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len); + #endif /* _LINUX_CRC64_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 9b5a692ce00c..087e06b4cdfd 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -145,6 +145,15 @@ config CRC_T10DIF kernel tree needs to calculate CRC checks for use with the SCSI data integrity subsystem. +config CRC64_ROCKSOFT + tristate "CRC calculation for the Rocksoft model CRC64" + select CRC64 + select CRYPTO + select CRYPTO_CRC64_ROCKSOFT + help + This option provides a CRC64 API to a registered crypto driver. + This is used with the block layer's data integrity subsystem. + config CRC_ITU_T tristate "CRC ITU-T V.41 functions" help diff --git a/lib/Makefile b/lib/Makefile index 300f569c626b..7f7ae7458b6c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -175,6 +175,7 @@ obj-$(CONFIG_CRC4) += crc4.o obj-$(CONFIG_CRC7) += crc7.o obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_CRC8) += crc8.o +obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o obj-$(CONFIG_XXHASH) += xxhash.o obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c new file mode 100644 index 000000000000..fc9ae0da5df7 --- /dev/null +++ b/lib/crc64-rocksoft.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct crypto_shash __rcu *crc64_rocksoft_tfm; +static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback); +static DEFINE_MUTEX(crc64_rocksoft_mutex); +static struct work_struct crc64_rocksoft_rehash_work; + +static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data) +{ + struct crypto_alg *alg = data; + + if (val != CRYPTO_MSG_ALG_LOADED || + strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING)) + return NOTIFY_DONE; + + schedule_work(&crc64_rocksoft_rehash_work); + return NOTIFY_OK; +} + +static void crc64_rocksoft_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; + + mutex_lock(&crc64_rocksoft_mutex); + old = rcu_dereference_protected(crc64_rocksoft_tfm, + lockdep_is_held(&crc64_rocksoft_mutex)); + new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0); + if (IS_ERR(new)) { + mutex_unlock(&crc64_rocksoft_mutex); + return; + } + rcu_assign_pointer(crc64_rocksoft_tfm, new); + mutex_unlock(&crc64_rocksoft_mutex); + + if (old) { + synchronize_rcu(); + crypto_free_shash(old); + } else { + static_branch_disable(&crc64_rocksoft_fallback); + } +} + +static struct notifier_block crc64_rocksoft_nb = { + .notifier_call = crc64_rocksoft_notify, +}; + +u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len) +{ + struct { + struct shash_desc shash; + u64 crc; + } desc; + int err; + + if (static_branch_unlikely(&crc64_rocksoft_fallback)) + return crc64_rocksoft_generic(crc, buffer, len); + + rcu_read_lock(); + desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm); + desc.crc = crc; + err = crypto_shash_update(&desc.shash, buffer, len); + rcu_read_unlock(); + + BUG_ON(err); + + return desc.crc; +} +EXPORT_SYMBOL_GPL(crc64_rocksoft_update); + +u64 crc64_rocksoft(const unsigned char *buffer, size_t len) +{ + return crc64_rocksoft_update(0, buffer, len); +} +EXPORT_SYMBOL_GPL(crc64_rocksoft); + +static int __init crc64_rocksoft_mod_init(void) +{ + INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash); + crypto_register_notifier(&crc64_rocksoft_nb); + crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work); + return 0; +} + +static void __exit crc64_rocksoft_mod_fini(void) +{ + crypto_unregister_notifier(&crc64_rocksoft_nb); + cancel_work_sync(&crc64_rocksoft_rehash_work); + crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1)); +} + +module_init(crc64_rocksoft_mod_init); +module_exit(crc64_rocksoft_mod_fini); + +static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp) +{ + struct crypto_shash *tfm; + int len; + + if (static_branch_unlikely(&crc64_rocksoft_fallback)) + return sprintf(buffer, "fallback\n"); + + rcu_read_lock(); + tfm = rcu_dereference(crc64_rocksoft_tfm); + len = snprintf(buffer, PAGE_SIZE, "%s\n", + crypto_shash_driver_name(tfm)); + rcu_read_unlock(); + + return len; +} + +module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444); + +MODULE_AUTHOR("Keith Busch "); +MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crc64"); -- cgit v1.2.3-71-gd317 From a7d4383f17e10f338ea757a849f02298790d24fb Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:11 -0800 Subject: block: add pi for extended integrity The NVMe specification defines new data integrity formats beyond the t10 tuple. Add support for the specification defined CRC64 formats, assuming the reference tag does not need to be split with the "storage tag". Cc: Hannes Reinecke Cc: "Martin K. Petersen" Signed-off-by: Keith Busch Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220303201312.3255347-8-kbusch@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 1 + block/t10-pi.c | 194 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/t10-pi.h | 20 +++++ 3 files changed, 215 insertions(+) (limited to 'include/linux') diff --git a/block/Kconfig b/block/Kconfig index 7eb5d6d53b3f..50b17e260fa2 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -73,6 +73,7 @@ config BLK_DEV_INTEGRITY_T10 tristate depends on BLK_DEV_INTEGRITY select CRC_T10DIF + select CRC64_ROCKSOFT config BLK_DEV_ZONED bool "Zoned block device support" diff --git a/block/t10-pi.c b/block/t10-pi.c index 758a76518854..914d8cddd43a 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -7,8 +7,10 @@ #include #include #include +#include #include #include +#include typedef __be16 (csum_fn) (void *, unsigned int); @@ -278,4 +280,196 @@ const struct blk_integrity_profile t10_pi_type3_ip = { }; EXPORT_SYMBOL(t10_pi_type3_ip); +static __be64 ext_pi_crc64(void *data, unsigned int len) +{ + return cpu_to_be64(crc64_rocksoft(data, len)); +} + +static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter, + enum t10_dif_type type) +{ + unsigned int i; + + for (i = 0 ; i < iter->data_size ; i += iter->interval) { + struct crc64_pi_tuple *pi = iter->prot_buf; + + pi->guard_tag = ext_pi_crc64(iter->data_buf, iter->interval); + pi->app_tag = 0; + + if (type == T10_PI_TYPE1_PROTECTION) + put_unaligned_be48(iter->seed, pi->ref_tag); + else + put_unaligned_be48(0ULL, pi->ref_tag); + + iter->data_buf += iter->interval; + iter->prot_buf += iter->tuple_size; + iter->seed++; + } + + return BLK_STS_OK; +} + +static bool ext_pi_ref_escape(u8 *ref_tag) +{ + static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0; +} + +static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, + enum t10_dif_type type) +{ + unsigned int i; + + for (i = 0; i < iter->data_size; i += iter->interval) { + struct crc64_pi_tuple *pi = iter->prot_buf; + u64 ref, seed; + __be64 csum; + + if (type == T10_PI_TYPE1_PROTECTION) { + if (pi->app_tag == T10_PI_APP_ESCAPE) + goto next; + + ref = get_unaligned_be48(pi->ref_tag); + seed = lower_48_bits(iter->seed); + if (ref != seed) { + pr_err("%s: ref tag error at location %llu (rcvd %llu)\n", + iter->disk_name, seed, ref); + return BLK_STS_PROTECTION; + } + } else if (type == T10_PI_TYPE3_PROTECTION) { + if (pi->app_tag == T10_PI_APP_ESCAPE && + ext_pi_ref_escape(pi->ref_tag)) + goto next; + } + + csum = ext_pi_crc64(iter->data_buf, iter->interval); + if (pi->guard_tag != csum) { + pr_err("%s: guard tag error at sector %llu " \ + "(rcvd %016llx, want %016llx)\n", + iter->disk_name, (unsigned long long)iter->seed, + be64_to_cpu(pi->guard_tag), be64_to_cpu(csum)); + return BLK_STS_PROTECTION; + } + +next: + iter->data_buf += iter->interval; + iter->prot_buf += iter->tuple_size; + iter->seed++; + } + + return BLK_STS_OK; +} + +static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter) +{ + return ext_pi_crc64_verify(iter, T10_PI_TYPE1_PROTECTION); +} + +static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter) +{ + return ext_pi_crc64_generate(iter, T10_PI_TYPE1_PROTECTION); +} + +static void ext_pi_type1_prepare(struct request *rq) +{ + const int tuple_sz = rq->q->integrity.tuple_size; + u64 ref_tag = ext_pi_ref_tag(rq); + struct bio *bio; + + __rq_for_each_bio(bio, rq) { + struct bio_integrity_payload *bip = bio_integrity(bio); + u64 virt = lower_48_bits(bip_get_seed(bip)); + struct bio_vec iv; + struct bvec_iter iter; + + /* Already remapped? */ + if (bip->bip_flags & BIP_MAPPED_INTEGRITY) + break; + + bip_for_each_vec(iv, bip, iter) { + unsigned int j; + void *p; + + p = bvec_kmap_local(&iv); + for (j = 0; j < iv.bv_len; j += tuple_sz) { + struct crc64_pi_tuple *pi = p; + u64 ref = get_unaligned_be48(pi->ref_tag); + + if (ref == virt) + put_unaligned_be48(ref_tag, pi->ref_tag); + virt++; + ref_tag++; + p += tuple_sz; + } + kunmap_local(p); + } + + bip->bip_flags |= BIP_MAPPED_INTEGRITY; + } +} + +static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) +{ + unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; + const int tuple_sz = rq->q->integrity.tuple_size; + u64 ref_tag = ext_pi_ref_tag(rq); + struct bio *bio; + + __rq_for_each_bio(bio, rq) { + struct bio_integrity_payload *bip = bio_integrity(bio); + u64 virt = lower_48_bits(bip_get_seed(bip)); + struct bio_vec iv; + struct bvec_iter iter; + + bip_for_each_vec(iv, bip, iter) { + unsigned int j; + void *p; + + p = bvec_kmap_local(&iv); + for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { + struct crc64_pi_tuple *pi = p; + u64 ref = get_unaligned_be48(pi->ref_tag); + + if (ref == ref_tag) + put_unaligned_be48(virt, pi->ref_tag); + virt++; + ref_tag++; + intervals--; + p += tuple_sz; + } + kunmap_local(p); + } + } +} + +static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter) +{ + return ext_pi_crc64_verify(iter, T10_PI_TYPE3_PROTECTION); +} + +static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter) +{ + return ext_pi_crc64_generate(iter, T10_PI_TYPE3_PROTECTION); +} + +const struct blk_integrity_profile ext_pi_type1_crc64 = { + .name = "EXT-DIF-TYPE1-CRC64", + .generate_fn = ext_pi_type1_generate_crc64, + .verify_fn = ext_pi_type1_verify_crc64, + .prepare_fn = ext_pi_type1_prepare, + .complete_fn = ext_pi_type1_complete, +}; +EXPORT_SYMBOL_GPL(ext_pi_type1_crc64); + +const struct blk_integrity_profile ext_pi_type3_crc64 = { + .name = "EXT-DIF-TYPE3-CRC64", + .generate_fn = ext_pi_type3_generate_crc64, + .verify_fn = ext_pi_type3_verify_crc64, + .prepare_fn = t10_pi_type3_prepare, + .complete_fn = t10_pi_type3_complete, +}; +EXPORT_SYMBOL_GPL(ext_pi_type3_crc64); + +MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL"); diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index c635c2e014e3..a4b1af581f69 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -53,4 +53,24 @@ extern const struct blk_integrity_profile t10_pi_type1_ip; extern const struct blk_integrity_profile t10_pi_type3_crc; extern const struct blk_integrity_profile t10_pi_type3_ip; +struct crc64_pi_tuple { + __be64 guard_tag; + __be16 app_tag; + __u8 ref_tag[6]; +}; + +static inline u64 ext_pi_ref_tag(struct request *rq) +{ + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (rq->q->integrity.interval_exp) + shift = rq->q->integrity.interval_exp; +#endif + return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT)); +} + +extern const struct blk_integrity_profile ext_pi_type1_crc64; +extern const struct blk_integrity_profile ext_pi_type3_crc64; + #endif -- cgit v1.2.3-71-gd317 From 4020aad85c6785ddac8d51f345ff9e3328ce773a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 3 Mar 2022 12:13:12 -0800 Subject: nvme: add support for enhanced metadata NVM Express ratified TP 4068 defines new protection information formats. Implement support for the CRC64 guard tags. Since the block layer doesn't support variable length reference tags, driver support for the Storage Tag space is not supported at this time. Cc: Hannes Reinecke Cc: "Martin K. Petersen" Cc: Klaus Jensen Signed-off-by: Keith Busch Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20220303201312.3255347-9-kbusch@kernel.org Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 162 ++++++++++++++++++++++++++++++++++++++++------- drivers/nvme/host/nvme.h | 4 +- include/linux/nvme.h | 53 ++++++++++++++-- 3 files changed, 188 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cd13b7dcd18f..51c08f206cbf 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -813,6 +813,30 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, return BLK_STS_OK; } +static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, + struct request *req) +{ + u32 upper, lower; + u64 ref48; + + /* both rw and write zeroes share the same reftag format */ + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); + break; + case NVME_NVM_NS_64B_GUARD: + ref48 = ext_pi_ref_tag(req); + lower = lower_32_bits(ref48); + upper = upper_32_bits(ref48); + + cmnd->rw.reftag = cpu_to_le32(lower); + cmnd->rw.cdw3 = cpu_to_le32(upper); + break; + default: + break; + } +} + static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { @@ -834,8 +858,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: - cmnd->write_zeroes.reftag = - cpu_to_le32(t10_pi_ref_tag(req)); + nvme_set_ref_tag(ns, cmnd, req); break; } } @@ -861,7 +884,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.opcode = op; cmnd->rw.flags = 0; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); - cmnd->rw.rsvd2 = 0; + cmnd->rw.cdw2 = 0; + cmnd->rw.cdw3 = 0; cmnd->rw.metadata = 0; cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); @@ -892,7 +916,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, NVME_RW_PRINFO_PRCHK_REF; if (op == nvme_cmd_zone_append) control |= NVME_RW_APPEND_PIREMAP; - cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); + nvme_set_ref_tag(ns, cmnd, req); break; } } @@ -1544,33 +1568,58 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) } #ifdef CONFIG_BLK_DEV_INTEGRITY -static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, +static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, u32 max_integrity_segments) { struct blk_integrity integrity = { }; - switch (pi_type) { + switch (ns->pi_type) { case NVME_NS_DPS_PI_TYPE3: - integrity.profile = &t10_pi_type3_crc; - integrity.tag_size = sizeof(u16) + sizeof(u32); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + integrity.profile = &t10_pi_type3_crc; + integrity.tag_size = sizeof(u16) + sizeof(u32); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + case NVME_NVM_NS_64B_GUARD: + integrity.profile = &ext_pi_type3_crc64; + integrity.tag_size = sizeof(u16) + 6; + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + default: + integrity.profile = NULL; + break; + } break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: - integrity.profile = &t10_pi_type1_crc; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + switch (ns->guard_type) { + case NVME_NVM_NS_16B_GUARD: + integrity.profile = &t10_pi_type1_crc; + integrity.tag_size = sizeof(u16); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + case NVME_NVM_NS_64B_GUARD: + integrity.profile = &ext_pi_type1_crc64; + integrity.tag_size = sizeof(u16); + integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + break; + default: + integrity.profile = NULL; + break; + } break; default: integrity.profile = NULL; break; } - integrity.tuple_size = ms; + + integrity.tuple_size = ns->ms; blk_integrity_register(disk, &integrity); blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); } #else -static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, +static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, u32 max_integrity_segments) { } @@ -1612,17 +1661,73 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) a->csi == b->csi; } -static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) { + bool first = id->dps & NVME_NS_DPS_PI_FIRST; + unsigned lbaf = nvme_lbaf_index(id->flbas); struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_command c = { }; + struct nvme_id_ns_nvm *nvm; + int ret = 0; + u32 elbaf; + + ns->pi_size = 0; + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { + ns->pi_size = sizeof(struct t10_pi_tuple); + ns->guard_type = NVME_NVM_NS_16B_GUARD; + goto set_pi; + } + + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!nvm) + return -ENOMEM; - ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); - if (id->dps & NVME_NS_DPS_PI_FIRST || - ns->ms == sizeof(struct t10_pi_tuple)) + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(ns->head->ns_id); + c.identify.cns = NVME_ID_CNS_CS_NS; + c.identify.csi = NVME_CSI_NVM; + + ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); + if (ret) + goto free_data; + + elbaf = le32_to_cpu(nvm->elbaf[lbaf]); + + /* no support for storage tag formats right now */ + if (nvme_elbaf_sts(elbaf)) + goto free_data; + + ns->guard_type = nvme_elbaf_guard_type(elbaf); + switch (ns->guard_type) { + case NVME_NVM_NS_64B_GUARD: + ns->pi_size = sizeof(struct crc64_pi_tuple); + break; + case NVME_NVM_NS_16B_GUARD: + ns->pi_size = sizeof(struct t10_pi_tuple); + break; + default: + break; + } + +free_data: + kfree(nvm); +set_pi: + if (ns->pi_size && (first || ns->ms == ns->pi_size)) ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; else ns->pi_type = 0; + return ret; +} + +static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + + if (nvme_init_ms(ns, id)) + return; + ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) return; @@ -1738,7 +1843,7 @@ static void nvme_update_disk_info(struct gendisk *disk, if (ns->ms) { if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && (ns->features & NVME_NS_METADATA_SUPPORTED)) - nvme_init_integrity(disk, ns->ms, ns->pi_type, + nvme_init_integrity(disk, ns, ns->ctrl->max_integrity_segments); else if (!nvme_ns_has_pi(ns)) capacity = 0; @@ -1793,7 +1898,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) { - unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; + unsigned lbaf = nvme_lbaf_index(id->flbas); int ret; blk_mq_freeze_queue(ns->disk->queue); @@ -2138,20 +2243,27 @@ static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) return ret; } -static int nvme_configure_acre(struct nvme_ctrl *ctrl) +static int nvme_configure_host_options(struct nvme_ctrl *ctrl) { struct nvme_feat_host_behavior *host; + u8 acre = 0, lbafee = 0; int ret; /* Don't bother enabling the feature if retry delay is not reported */ - if (!ctrl->crdt[0]) + if (ctrl->crdt[0]) + acre = NVME_ENABLE_ACRE; + if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) + lbafee = NVME_ENABLE_LBAFEE; + + if (!acre && !lbafee) return 0; host = kzalloc(sizeof(*host), GFP_KERNEL); if (!host) return 0; - host->acre = NVME_ENABLE_ACRE; + host->acre = acre; + host->lbafee = lbafee; ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, host, sizeof(*host), NULL); kfree(host); @@ -2989,7 +3101,7 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) if (ret < 0) return ret; - ret = nvme_configure_acre(ctrl); + ret = nvme_configure_host_options(ctrl); if (ret < 0) return ret; @@ -4703,12 +4815,14 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1bed663322ee..e7ccdb119ede 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -453,9 +453,11 @@ struct nvme_ns { int lba_shift; u16 ms; + u16 pi_size; u16 sgs; u32 sws; u8 pi_type; + u8 guard_type; #ifdef CONFIG_BLK_DEV_ZONED u64 zsze; #endif @@ -478,7 +480,7 @@ struct nvme_ns { /* NVMe ns supports metadata actions by the controller (generate/strip) */ static inline bool nvme_ns_has_pi(struct nvme_ns *ns) { - return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); + return ns->pi_type && ns->ms == ns->pi_size; } struct nvme_ctrl_ops { diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 9dbc3ef4daf7..4f44f83817a9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -244,6 +244,7 @@ enum { enum nvme_ctrl_attr { NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), NVME_CTRL_ATTR_TBKAS = (1 << 6), + NVME_CTRL_ATTR_ELBAS = (1 << 15), }; struct nvme_id_ctrl { @@ -399,8 +400,7 @@ struct nvme_id_ns { __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; - struct nvme_lbaf lbaf[16]; - __u8 rsvd192[192]; + struct nvme_lbaf lbaf[64]; __u8 vs[3712]; }; @@ -418,8 +418,7 @@ struct nvme_id_ns_zns { __le32 rrl; __le32 frl; __u8 rsvd20[2796]; - struct nvme_zns_lbafe lbafe[16]; - __u8 rsvd3072[768]; + struct nvme_zns_lbafe lbafe[64]; __u8 vs[256]; }; @@ -428,6 +427,30 @@ struct nvme_id_ctrl_zns { __u8 rsvd1[4095]; }; +struct nvme_id_ns_nvm { + __le64 lbstm; + __u8 pic; + __u8 rsvd9[3]; + __le32 elbaf[64]; + __u8 rsvd268[3828]; +}; + +enum { + NVME_ID_NS_NVM_STS_MASK = 0x3f, + NVME_ID_NS_NVM_GUARD_SHIFT = 7, + NVME_ID_NS_NVM_GUARD_MASK = 0x3, +}; + +static inline __u8 nvme_elbaf_sts(__u32 elbaf) +{ + return elbaf & NVME_ID_NS_NVM_STS_MASK; +} + +static inline __u8 nvme_elbaf_guard_type(__u32 elbaf) +{ + return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK; +} + struct nvme_id_ctrl_nvm { __u8 vsl; __u8 wzsl; @@ -478,6 +501,8 @@ enum { NVME_NS_FEAT_IO_OPT = 1 << 4, NVME_NS_ATTR_RO = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, + NVME_NS_FLBAS_LBA_UMASK = 0x60, + NVME_NS_FLBAS_LBA_SHIFT = 1, NVME_NS_FLBAS_META_EXT = 0x10, NVME_NS_NMIC_SHARED = 1 << 0, NVME_LBAF_RP_BEST = 0, @@ -496,6 +521,18 @@ enum { NVME_NS_DPS_PI_TYPE3 = 3, }; +enum { + NVME_NVM_NS_16B_GUARD = 0, + NVME_NVM_NS_32B_GUARD = 1, + NVME_NVM_NS_64B_GUARD = 2, +}; + +static inline __u8 nvme_lbaf_index(__u8 flbas) +{ + return (flbas & NVME_NS_FLBAS_LBA_MASK) | + ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT); +} + /* Identify Namespace Metadata Capabilities (MC): */ enum { NVME_MC_EXTENDED_LBA = (1 << 0), @@ -842,7 +879,8 @@ struct nvme_rw_command { __u8 flags; __u16 command_id; __le32 nsid; - __u64 rsvd2; + __le32 cdw2; + __le32 cdw3; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; @@ -996,11 +1034,14 @@ enum { struct nvme_feat_host_behavior { __u8 acre; - __u8 resv1[511]; + __u8 etdas; + __u8 lbafee; + __u8 resv1[509]; }; enum { NVME_ENABLE_ACRE = 1, + NVME_ENABLE_LBAFEE = 1, }; /* Admin commands */ -- cgit v1.2.3-71-gd317 From 2984539959dbaf4e65e19bf90c2419304a81a985 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 8 Feb 2022 17:16:33 +0100 Subject: tick/rcu: Remove obsolete rcu_needs_cpu() parameters With the removal of CONFIG_RCU_FAST_NO_HZ, the parameters in rcu_needs_cpu() are not necessary anymore. Simply remove them. Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Paul E. McKenney Cc: Paul Menzel --- include/linux/rcutiny.h | 3 +-- include/linux/rcutree.h | 2 +- kernel/rcu/tree.c | 3 +-- kernel/time/tick-sched.c | 10 ++++------ 4 files changed, 7 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 858f4d429946..5fed476f977f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -64,9 +64,8 @@ static inline void rcu_softirq_qs(void) rcu_tasks_qs(current, (preempt)); \ } while (0) -static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) +static inline int rcu_needs_cpu(void) { - *nextevt = KTIME_MAX; return 0; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 53209d669400..6cc91291d078 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -19,7 +19,7 @@ void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); -int rcu_needs_cpu(u64 basem, u64 *nextevt); +int rcu_needs_cpu(void); void rcu_cpu_stall_reset(void); /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a4c25a6283b0..80faf2273ce9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1086,9 +1086,8 @@ void rcu_irq_enter_irqson(void) * Just check whether or not this CPU has non-offloaded RCU callbacks * queued. */ -int rcu_needs_cpu(u64 basemono, u64 *nextevt) +int rcu_needs_cpu(void) { - *nextevt = KTIME_MAX; return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c89f50a7e690..566ad5bd83e9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -785,7 +785,7 @@ static inline bool local_timer_softirq_pending(void) static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { - u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; + u64 basemono, next_tick, delta, expires; unsigned long basejiff; unsigned int seq; @@ -808,7 +808,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) * minimal delta which brings us back to this place * immediately. Lather, rinse and repeat... */ - if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || + if (rcu_needs_cpu() || arch_needs_cpu() || irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { @@ -819,10 +819,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) * disabled this also looks at the next expiring * hrtimer. */ - next_tmr = get_next_timer_interrupt(basejiff, basemono); - ts->next_timer = next_tmr; - /* Take the next rcu event into account */ - next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; + next_tick = get_next_timer_interrupt(basejiff, basemono); + ts->next_timer = next_tick; } /* -- cgit v1.2.3-71-gd317 From 0345691b24c076655ce8f0f4bfd24cba3467ccbd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 8 Feb 2022 17:16:34 +0100 Subject: tick/rcu: Stop allowing RCU_SOFTIRQ in idle RCU_SOFTIRQ used to be special in that it could be raised on purpose within the idle path to prevent from stopping the tick. Some code still prevents from unnecessary warnings related to this specific behaviour while entering in dynticks-idle mode. However the nohz layout has changed quite a bit in ten years, and the removal of CONFIG_RCU_FAST_NO_HZ has been the final straw to this safe-conduct. Now the RCU_SOFTIRQ vector is expected to be raised from sane places. A remaining corner case is admitted though when the vector is invoked in fragile hotplug path. Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Paul E. McKenney Cc: Paul Menzel --- include/linux/interrupt.h | 8 +++++++- kernel/time/tick-sched.c | 50 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 47 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9367f1cb2e3c..9613326d2f8a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -579,7 +579,13 @@ enum NR_SOFTIRQS }; -#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) +/* + * Ignoring the RCU vector after ksoftirqd is parked is fine + * because: + * 1) rcutree_migrate_callbacks() takes care of the queue. + * 2) rcu_report_dead() reports the final quiescent states. + */ +#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ)) /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 566ad5bd83e9..2d76c91b85de 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -999,6 +999,45 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) __tick_nohz_full_update_tick(ts, ktime_get()); } +/* + * A pending softirq outside an IRQ (or softirq disabled section) context + * should be waiting for ksoftirqd to handle it. Therefore we shouldn't + * reach here due to the need_resched() early check in can_stop_idle_tick(). + * + * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the + * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, + * triggering the below since wakep_softirqd() is ignored. + * + */ +static bool report_idle_softirq(void) +{ + static int ratelimit; + unsigned int pending = local_softirq_pending(); + + if (likely(!pending)) + return false; + + /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ + if (!cpu_active(smp_processor_id())) { + pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; + if (!pending) + return false; + } + + if (ratelimit < 10) + return false; + + /* On RT, softirqs handling may be waiting on some lock */ + if (!local_bh_blocked()) + return false; + + pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", + pending); + ratelimit++; + + return true; +} + static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) { /* @@ -1025,17 +1064,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (need_resched()) return false; - if (unlikely(local_softirq_pending())) { - static int ratelimit; - - if (ratelimit < 10 && !local_bh_blocked() && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + if (unlikely(report_idle_softirq())) return false; - } if (tick_nohz_full_enabled()) { /* -- cgit v1.2.3-71-gd317 From f96272a90d9eaea9933aaab704ddbd258feb3841 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 8 Feb 2022 17:16:35 +0100 Subject: lib/irq_poll: Declare IRQ_POLL softirq vector as ksoftirqd-parking safe The following warning may appear while setting a CPU down: NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #20!!! The IRQ_POLL_SOFTIRQ vector can be raised during the hotplug cpu_down() path after ksoftirqd is parked and before the CPU actually dies. However this is handled afterward at the CPUHP_IRQ_POLL_DEAD stage where the queue gets migrated. Hence this warning can be considered spurious and the vector can join the "hotplug-safe" list. Reported-and-tested-by: Paul Menzel Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Paul E. McKenney Cc: Paul Menzel --- include/linux/interrupt.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9613326d2f8a..f40754caaefa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -580,12 +580,15 @@ enum }; /* - * Ignoring the RCU vector after ksoftirqd is parked is fine - * because: - * 1) rcutree_migrate_callbacks() takes care of the queue. + * The following vectors can be safely ignored after ksoftirqd is parked: + * + * _ RCU: + * 1) rcutree_migrate_callbacks() migrates the queue. * 2) rcu_report_dead() reports the final quiescent states. + * + * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue */ -#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ)) +#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ)) /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. -- cgit v1.2.3-71-gd317 From b0e846248de5bd57e4b785791f359ecb8f2d7311 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Mon, 27 Dec 2021 07:48:39 +0100 Subject: mfd: db8500-prcmu: Remove dead code for a non-existing config The config DBX500_PRCMU_QOS_POWER was never introduced in the kernel repository. So, the ifdef in ./include/linux/mfd/dbx500-prcmu.h was never effective. Remove these dead function prototypes. Signed-off-by: Lukas Bulwahn Reviewed-by: Linus Walleij Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20211227064839.21405-1-lukas.bulwahn@gmail.com --- include/linux/mfd/dbx500-prcmu.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index cbf9d7619493..2a255362b5ac 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -556,22 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits) #define PRCMU_QOS_ARM_OPP 3 #define PRCMU_QOS_DEFAULT_VALUE -1 -#ifdef CONFIG_DBX500_PRCMU_QOS_POWER - -unsigned long prcmu_qos_get_cpufreq_opp_delay(void); -void prcmu_qos_set_cpufreq_opp_delay(unsigned long); -void prcmu_qos_force_opp(int, s32); -int prcmu_qos_requirement(int pm_qos_class); -int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value); -int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value); -void prcmu_qos_remove_requirement(int pm_qos_class, char *name); -int prcmu_qos_add_notifier(int prcmu_qos_class, - struct notifier_block *notifier); -int prcmu_qos_remove_notifier(int prcmu_qos_class, - struct notifier_block *notifier); - -#else - static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) { return 0; @@ -613,6 +597,4 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class, return 0; } -#endif - #endif /* __MACH_PRCMU_H */ -- cgit v1.2.3-71-gd317 From 56f216d8efbc1212bf5ff8a6ff5e29927965e8db Mon Sep 17 00:00:00 2001 From: Peter Geis Date: Tue, 8 Feb 2022 14:40:23 -0500 Subject: mfd: rk808: Add reboot support to rk808.c This adds reboot support to the rk808 pmic driver and enables it for the rk809 and rk817 devices. This only enables if the rockchip,system-power-controller flag is set. Signed-off-by: Peter Geis Signed-off-by: Frank Wunderlich Reviewed-by: Dmitry Osipenko Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20220208194023.929720-1-pgwipeout@gmail.com --- drivers/mfd/rk808.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/rk808.h | 1 + 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index b181fe401330..4142b638e5fa 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -19,6 +19,7 @@ #include #include #include +#include struct rk808_reg_data { int addr; @@ -543,6 +544,7 @@ static void rk808_pm_power_off(void) reg = RK808_DEVCTRL_REG, bit = DEV_OFF_RST; break; + case RK809_ID: case RK817_ID: reg = RK817_SYS_CFG(3); bit = DEV_OFF; @@ -559,6 +561,34 @@ static void rk808_pm_power_off(void) dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); } +static int rk808_restart_notify(struct notifier_block *this, unsigned long mode, void *cmd) +{ + struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); + unsigned int reg, bit; + int ret; + + switch (rk808->variant) { + case RK809_ID: + case RK817_ID: + reg = RK817_SYS_CFG(3); + bit = DEV_RST; + break; + + default: + return NOTIFY_DONE; + } + ret = regmap_update_bits(rk808->regmap, reg, bit, bit); + if (ret) + dev_err(&rk808_i2c_client->dev, "Failed to restart device!\n"); + + return NOTIFY_DONE; +} + +static struct notifier_block rk808_restart_handler = { + .notifier_call = rk808_restart_notify, + .priority = 192, +}; + static void rk8xx_shutdown(struct i2c_client *client) { struct rk808 *rk808 = i2c_get_clientdata(client); @@ -727,6 +757,18 @@ static int rk808_probe(struct i2c_client *client, if (of_property_read_bool(np, "rockchip,system-power-controller")) { rk808_i2c_client = client; pm_power_off = rk808_pm_power_off; + + switch (rk808->variant) { + case RK809_ID: + case RK817_ID: + ret = register_restart_handler(&rk808_restart_handler); + if (ret) + dev_warn(&client->dev, "failed to register rst handler, %d\n", ret); + break; + default: + dev_dbg(&client->dev, "pmic controlled board reset not supported\n"); + break; + } } return 0; @@ -749,6 +791,8 @@ static int rk808_remove(struct i2c_client *client) if (pm_power_off == rk808_pm_power_off) pm_power_off = NULL; + unregister_restart_handler(&rk808_restart_handler); + return 0; } diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index a96e6d43ca06..58602032e642 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -373,6 +373,7 @@ enum rk805_reg { #define SWITCH2_EN BIT(6) #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) +#define DEV_RST BIT(2) #define DEV_OFF BIT(0) #define RTC_STOP BIT(0) -- cgit v1.2.3-71-gd317 From 68fa55f0e05ce371c4b5de7932d9f570d61bf791 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 11 Feb 2022 10:23:46 +0530 Subject: perf/marvell: cn10k DDR perf event core ownership As DDR perf event counters are not per core, so they should be accessed only by one core at a time. Select new core when previously owning core is going offline. Signed-off-by: Bharat Bhushan Reviewed-by: Bhaskara Budiredla Link: https://lore.kernel.org/r/20220211045346.17894-5-bbhushan2@marvell.com Signed-off-by: Will Deacon --- drivers/perf/Kconfig | 7 +++++ drivers/perf/marvell_cn10k_ddr_pmu.c | 50 ++++++++++++++++++++++++++++++++++-- include/linux/cpuhotplug.h | 1 + 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 7d6ffdf44a41..b9deef6ed423 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -148,4 +148,11 @@ config MARVELL_CN10K_TAD_PMU source "drivers/perf/hisilicon/Kconfig" +config MARVELL_CN10K_DDR_PMU + tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support" + depends on ARM64 || (COMPILE_TEST && 64BIT) + help + Enable perf support for Marvell DDR Performance monitoring + event on CN10K platform. + endmenu diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c index 19c8744e1a23..7f3146e71f99 100644 --- a/drivers/perf/marvell_cn10k_ddr_pmu.c +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c @@ -129,6 +129,7 @@ struct cn10k_ddr_pmu { int active_events; struct perf_event *events[DDRC_PERF_NUM_COUNTERS]; struct hrtimer hrtimer; + struct hlist_node node; }; #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) @@ -610,6 +611,24 @@ static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer) return HRTIMER_RESTART; } +static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu, + node); + unsigned int target; + + if (cpu != pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&pmu->pmu, cpu, target); + pmu->cpu = target; + return 0; +} + static int cn10k_ddr_perf_probe(struct platform_device *pdev) { struct cn10k_ddr_pmu *ddr_pmu; @@ -661,18 +680,31 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev) hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler; + cpuhp_state_add_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); if (ret) - return ret; + goto error; pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start); return 0; +error: + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + return ret; } static int cn10k_ddr_perf_remove(struct platform_device *pdev) { struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + &ddr_pmu->node); + perf_pmu_unregister(&ddr_pmu->pmu); return 0; } @@ -697,12 +729,26 @@ static struct platform_driver cn10k_ddr_pmu_driver = { static int __init cn10k_ddr_pmu_init(void) { - return platform_driver_register(&cn10k_ddr_pmu_driver); + int ret; + + ret = cpuhp_setup_state_multi( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + "perf/marvell/cn10k/ddr:online", NULL, + cn10k_ddr_pmu_offline_cpu); + if (ret) + return ret; + + ret = platform_driver_register(&cn10k_ddr_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE); + return ret; } static void __exit cn10k_ddr_pmu_exit(void) { platform_driver_unregister(&cn10k_ddr_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE); } module_init(cn10k_ddr_pmu_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 411a428ace4d..2bc550ac8dc7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -231,6 +231,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, -- cgit v1.2.3-71-gd317 From 64807c2321512f67959e51f09e302c145c336184 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Mon, 7 Mar 2022 21:45:14 +0530 Subject: net: phy: exported the genphy_read_master_slave function genphy_read_master_slave function allows to configure the master/slave for gigabit phys only. In order to use this function irrespective of speed, moved the speed check to the genphy_read_status call. Signed-off-by: Arun Ramadoss Reviewed-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/phy/phy_device.c | 19 +++++++++---------- include/linux/phy.h | 1 + 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ce0bb5951b81..8406ac739def 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2051,17 +2051,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev) CTL1000_PREFER_MASTER), ctl); } -static int genphy_read_master_slave(struct phy_device *phydev) +int genphy_read_master_slave(struct phy_device *phydev) { int cfg, state; int val; - if (!phydev->is_gigabit_capable) { - phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; - phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; - return 0; - } - phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; @@ -2102,6 +2096,7 @@ static int genphy_read_master_slave(struct phy_device *phydev) return 0; } +EXPORT_SYMBOL(genphy_read_master_slave); /** * genphy_restart_aneg - Enable and Restart Autonegotiation @@ -2396,14 +2391,18 @@ int genphy_read_status(struct phy_device *phydev) if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) return 0; + phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; - err = genphy_read_master_slave(phydev); - if (err < 0) - return err; + if (phydev->is_gigabit_capable) { + err = genphy_read_master_slave(phydev); + if (err < 0) + return err; + } err = genphy_read_lpa(phydev); if (err < 0) diff --git a/include/linux/phy.h b/include/linux/phy.h index cd08cf1a8b0d..20beeaa7443b 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1578,6 +1578,7 @@ int genphy_update_link(struct phy_device *phydev); int genphy_read_lpa(struct phy_device *phydev); int genphy_read_status_fixed(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); +int genphy_read_master_slave(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); -- cgit v1.2.3-71-gd317 From 1280f12f56a15abde23503ba876343e5f201c9c2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 8 Feb 2022 18:56:03 +0000 Subject: drivers/perf: arm_pmu: Handle 47 bit counters The current ARM PMU framework can only deal with 32 or 64bit counters. Teach it about a 47bit flavour. Yes, this is odd. Reviewed-by: Hector Martin Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 2 ++ include/linux/perf/arm_pmu.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 295cc7952d0e..0a9ed1a061ac 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event) { if (event->hw.flags & ARMPMU_EVT_64BIT) return GENMASK_ULL(63, 0); + else if (event->hw.flags & ARMPMU_EVT_47BIT) + return GENMASK_ULL(46, 0); else return GENMASK_ULL(31, 0); } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 2512e2f9cd4e..0407a38b470a 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -26,6 +26,8 @@ */ /* Event uses a 64bit counter */ #define ARMPMU_EVT_64BIT 1 +/* Event uses a 47bit counter */ +#define ARMPMU_EVT_47BIT 2 #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) PERF_COUNT_HW_CACHE_##_x -- cgit v1.2.3-71-gd317 From a8749a35c39903120ec421ef2525acc8e0daa55c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 8 Mar 2022 04:47:22 -0500 Subject: mm: vmalloc: introduce array allocation functions Linux has dozens of occurrences of vmalloc(array_size()) and vzalloc(array_size()). Allow to simplify the code by providing vmalloc_array and vcalloc, as well as the underscored variants that let the caller specify the GFP flags. Acked-by: Michal Hocko Signed-off-by: Paolo Bonzini --- include/linux/vmalloc.h | 5 +++++ mm/util.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 880227b9f044..d1bbd4fd50c5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -159,6 +159,11 @@ void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) __alloc_size(1); void *vmalloc_no_huge(unsigned long size) __alloc_size(1); +extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); +extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); + extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/mm/util.c b/mm/util.c index 7e43369064c8..94475abe54a0 100644 --- a/mm/util.c +++ b/mm/util.c @@ -647,6 +647,56 @@ void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) } EXPORT_SYMBOL(kvrealloc); +/** + * __vmalloc_array - allocate memory for a virtually contiguous array. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). + */ +void *__vmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + return __vmalloc(bytes, flags); +} +EXPORT_SYMBOL(__vmalloc_array); + +/** + * vmalloc_array - allocate memory for a virtually contiguous array. + * @n: number of elements. + * @size: element size. + */ +void *vmalloc_array(size_t n, size_t size) +{ + return __vmalloc_array(n, size, GFP_KERNEL); +} +EXPORT_SYMBOL(vmalloc_array); + +/** + * __vcalloc - allocate and zero memory for a virtually contiguous array. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). + */ +void *__vcalloc(size_t n, size_t size, gfp_t flags) +{ + return __vmalloc_array(n, size, flags | __GFP_ZERO); +} +EXPORT_SYMBOL(__vcalloc); + +/** + * vcalloc - allocate and zero memory for a virtually contiguous array. + * @n: number of elements. + * @size: element size. + */ +void *vcalloc(size_t n, size_t size) +{ + return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); +} +EXPORT_SYMBOL(vcalloc); + /* Neutral page->mapping pointer to address_space or anon_vma or other */ void *page_rmapping(struct page *page) { -- cgit v1.2.3-71-gd317 From a99a3e2efaf1f4454eb5c9176f47e66de075b134 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 31 Jan 2022 11:50:46 -0600 Subject: coredump: Move definition of struct coredump_params into coredump.h Move the definition of struct coredump_params into coredump.h where it belongs. Remove the slightly errorneous comment explaining why struct coredump_params was declared in binfmts.h. Signed-off-by: "Eric W. Biederman" --- fs/binfmt_flat.c | 1 + include/linux/binfmts.h | 13 +------------ include/linux/coredump.h | 12 +++++++++++- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 5f0bf24bb3b8..208cdce16de1 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 5d651c219c99..3dc20c4f394c 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -8,6 +8,7 @@ #include struct filename; +struct coredump_params; #define CORENAME_MAX_SIZE 128 @@ -77,18 +78,6 @@ struct linux_binprm { #define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3 #define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT) -/* Function parameter for binfmt->coredump */ -struct coredump_params { - const kernel_siginfo_t *siginfo; - struct pt_regs *regs; - struct file *file; - unsigned long limit; - unsigned long mm_flags; - loff_t written; - loff_t pos; - loff_t to_skip; -}; - /* * This structure defines the functions that are used to load the binary formats that * linux accepts. diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 248a68c668b4..2ee1460a1d66 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -14,11 +14,21 @@ struct core_vma_metadata { unsigned long dump_size; }; +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + unsigned long limit; + unsigned long mm_flags; + loff_t written; + loff_t pos; + loff_t to_skip; +}; + /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ -struct coredump_params; extern void dump_skip_to(struct coredump_params *cprm, unsigned long to); extern void dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); -- cgit v1.2.3-71-gd317 From 95c5436a4883841588dae86fb0b9325f47ba5ad3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 8 Mar 2022 12:55:29 -0600 Subject: coredump: Snapshot the vmas in do_coredump Move the call of dump_vma_snapshot and kvfree(vma_meta) out of the individual coredump routines into do_coredump itself. This makes the code less error prone and easier to maintain. Make the vma snapshot available to the coredump routines in struct coredump_params. This makes it easier to change and update what is captures in the vma snapshot and will be needed for fixing fill_file_notes. Reviewed-by: Jann Horn Reviewed-by: Kees Cook Signed-off-by: "Eric W. Biederman" --- fs/binfmt_elf.c | 20 +++++++------------- fs/binfmt_elf_fdpic.c | 18 ++++++------------ fs/coredump.c | 41 +++++++++++++++++++++++------------------ include/linux/coredump.h | 6 +++--- 4 files changed, 39 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index eaf39b1bdbbb..5710467cf4b6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2191,8 +2191,7 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, static int elf_core_dump(struct coredump_params *cprm) { int has_dumped = 0; - int vma_count, segs, i; - size_t vma_data_size; + int segs, i; struct elfhdr elf; loff_t offset = 0, dataoff; struct elf_note_info info = { }; @@ -2200,16 +2199,12 @@ static int elf_core_dump(struct coredump_params *cprm) struct elf_shdr *shdr4extnum = NULL; Elf_Half e_phnum; elf_addr_t e_shoff; - struct core_vma_metadata *vma_meta; - - if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size)) - return 0; /* * The number of segs are recored into ELF header as 16bit value. * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. */ - segs = vma_count + elf_core_extra_phdrs(); + segs = cprm->vma_count + elf_core_extra_phdrs(); /* for notes section */ segs++; @@ -2248,7 +2243,7 @@ static int elf_core_dump(struct coredump_params *cprm) dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); - offset += vma_data_size; + offset += cprm->vma_data_size; offset += elf_core_extra_data_size(); e_shoff = offset; @@ -2268,8 +2263,8 @@ static int elf_core_dump(struct coredump_params *cprm) goto end_coredump; /* Write program headers for segments dump */ - for (i = 0; i < vma_count; i++) { - struct core_vma_metadata *meta = vma_meta + i; + for (i = 0; i < cprm->vma_count; i++) { + struct core_vma_metadata *meta = cprm->vma_meta + i; struct elf_phdr phdr; phdr.p_type = PT_LOAD; @@ -2306,8 +2301,8 @@ static int elf_core_dump(struct coredump_params *cprm) /* Align to page */ dump_skip_to(cprm, dataoff); - for (i = 0; i < vma_count; i++) { - struct core_vma_metadata *meta = vma_meta + i; + for (i = 0; i < cprm->vma_count; i++) { + struct core_vma_metadata *meta = cprm->vma_meta + i; if (!dump_user_range(cprm, meta->start, meta->dump_size)) goto end_coredump; @@ -2324,7 +2319,6 @@ static int elf_core_dump(struct coredump_params *cprm) end_coredump: free_note_info(&info); kfree(shdr4extnum); - kvfree(vma_meta); kfree(phdr4note); return has_dumped; } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 7fa6e6632d9d..08d0c8797828 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1465,7 +1465,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm, static int elf_fdpic_core_dump(struct coredump_params *cprm) { int has_dumped = 0; - int vma_count, segs; + int segs; int i; struct elfhdr *elf = NULL; loff_t offset = 0, dataoff; @@ -1480,8 +1480,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) elf_addr_t e_shoff; struct core_thread *ct; struct elf_thread_status *tmp; - struct core_vma_metadata *vma_meta = NULL; - size_t vma_data_size; /* alloc memory for large data structures: too large to be on stack */ elf = kmalloc(sizeof(*elf), GFP_KERNEL); @@ -1491,9 +1489,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) if (!psinfo) goto end_coredump; - if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size)) - goto end_coredump; - for (ct = current->signal->core_state->dumper.next; ct; ct = ct->next) { tmp = elf_dump_thread_status(cprm->siginfo->si_signo, @@ -1513,7 +1508,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) tmp->next = thread_list; thread_list = tmp; - segs = vma_count + elf_core_extra_phdrs(); + segs = cprm->vma_count + elf_core_extra_phdrs(); /* for notes section */ segs++; @@ -1558,7 +1553,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) /* Page-align dumped data */ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); - offset += vma_data_size; + offset += cprm->vma_data_size; offset += elf_core_extra_data_size(); e_shoff = offset; @@ -1578,8 +1573,8 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) goto end_coredump; /* write program headers for segments dump */ - for (i = 0; i < vma_count; i++) { - struct core_vma_metadata *meta = vma_meta + i; + for (i = 0; i < cprm->vma_count; i++) { + struct core_vma_metadata *meta = cprm->vma_meta + i; struct elf_phdr phdr; size_t sz; @@ -1628,7 +1623,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) dump_skip_to(cprm, dataoff); - if (!elf_fdpic_dump_segments(cprm, vma_meta, vma_count)) + if (!elf_fdpic_dump_segments(cprm, cprm->vma_meta, cprm->vma_count)) goto end_coredump; if (!elf_core_write_extra_data(cprm)) @@ -1652,7 +1647,6 @@ end_coredump: thread_list = thread_list->next; kfree(tmp); } - kvfree(vma_meta); kfree(phdr4note); kfree(elf); kfree(psinfo); diff --git a/fs/coredump.c b/fs/coredump.c index b73817712dd2..88ac67994d03 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -54,6 +54,8 @@ #include +static bool dump_vma_snapshot(struct coredump_params *cprm); + static int core_uses_pid; static unsigned int core_pipe_limit; static char core_pattern[CORENAME_MAX_SIZE] = "core"; @@ -532,6 +534,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) * by any locks. */ .mm_flags = mm->flags, + .vma_meta = NULL, }; audit_core_dumps(siginfo->si_signo); @@ -746,6 +749,9 @@ void do_coredump(const kernel_siginfo_t *siginfo) pr_info("Core dump to |%s disabled\n", cn.corename); goto close_fail; } + if (!dump_vma_snapshot(&cprm)) + goto close_fail; + file_start_write(cprm.file); core_dumped = binfmt->core_dump(&cprm); /* @@ -759,6 +765,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) dump_emit(&cprm, "", 1); } file_end_write(cprm.file); + kvfree(cprm.vma_meta); } if (ispipe && core_pipe_limit) wait_for_dump_helpers(cprm.file); @@ -1096,14 +1103,11 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, * Under the mmap_lock, take a snapshot of relevant information about the task's * VMAs. */ -int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, - struct core_vma_metadata **vma_meta, - size_t *vma_data_size_ptr) +static bool dump_vma_snapshot(struct coredump_params *cprm) { struct vm_area_struct *vma, *gate_vma; struct mm_struct *mm = current->mm; int i; - size_t vma_data_size = 0; /* * Once the stack expansion code is fixed to not change VMA bounds @@ -1111,20 +1115,21 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, * mmap_lock in read mode. */ if (mmap_write_lock_killable(mm)) - return -EINTR; + return false; + cprm->vma_data_size = 0; gate_vma = get_gate_vma(mm); - *vma_count = mm->map_count + (gate_vma ? 1 : 0); + cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); - *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL); - if (!*vma_meta) { + cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL); + if (!cprm->vma_meta) { mmap_write_unlock(mm); - return -ENOMEM; + return false; } for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; vma = next_vma(vma, gate_vma), i++) { - struct core_vma_metadata *m = (*vma_meta) + i; + struct core_vma_metadata *m = cprm->vma_meta + i; m->start = vma->vm_start; m->end = vma->vm_end; @@ -1134,13 +1139,14 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, mmap_write_unlock(mm); - if (WARN_ON(i != *vma_count)) { - kvfree(*vma_meta); - return -EFAULT; + if (WARN_ON(i != cprm->vma_count)) { + kvfree(cprm->vma_meta); + return false; } - for (i = 0; i < *vma_count; i++) { - struct core_vma_metadata *m = (*vma_meta) + i; + + for (i = 0; i < cprm->vma_count; i++) { + struct core_vma_metadata *m = cprm->vma_meta + i; if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { char elfmag[SELFMAG]; @@ -1153,9 +1159,8 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, } } - vma_data_size += m->dump_size; + cprm->vma_data_size += m->dump_size; } - *vma_data_size_ptr = vma_data_size; - return 0; + return true; } diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 2ee1460a1d66..7d05370e555e 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -23,6 +23,9 @@ struct coredump_params { loff_t written; loff_t pos; loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; }; /* @@ -35,9 +38,6 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); int dump_user_range(struct coredump_params *cprm, unsigned long start, unsigned long len); -int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, - struct core_vma_metadata **vma_meta, - size_t *vma_data_size_ptr); extern void do_coredump(const kernel_siginfo_t *siginfo); #else static inline void do_coredump(const kernel_siginfo_t *siginfo) {} -- cgit v1.2.3-71-gd317 From a759de6991b35ad437adba32b5f0cb2fd9e75929 Mon Sep 17 00:00:00 2001 From: Youngjin Jang Date: Tue, 8 Mar 2022 04:07:39 +0900 Subject: PM: sleep: Add device name to suspend_report_result() Currently, suspend_report_result() prints only function information. If any driver uses a common PM function, nobody knows who exactly called the failing function. A device pinter is needed to recognize the failing device. For example: PM: dpm_run_callback(): pnp_bus_suspend+0x0/0x10 returns 0 PM: dpm_run_callback(): pci_pm_suspend+0x0/0x150 returns 0 become after the change: serial 00:05: PM: dpm_run_callback(): pnp_bus_suspend+0x0/0x10 returns 0 pci 0000:00:01.3: PM: dpm_run_callback(): pci_pm_suspend+0x0/0x150 returns 0 Signed-off-by: Youngjin Jang [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 10 +++++----- drivers/pci/pci-driver.c | 14 +++++++------- drivers/pnp/driver.c | 2 +- drivers/usb/core/hcd-pci.c | 4 ++-- include/linux/pm.h | 8 ++++---- 5 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 04ea92cbd9cf..41e17b8c2c20 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state, trace_device_pm_callback_start(dev, info, state.event); error = cb(dev, state); trace_device_pm_callback_end(dev, error); - suspend_report_result(cb, error); + suspend_report_result(dev, cb, error); initcall_debug_report(dev, calltime, cb, error); @@ -1855,7 +1855,7 @@ unlock: device_unlock(dev); if (ret < 0) { - suspend_report_result(callback, ret); + suspend_report_result(dev, callback, ret); pm_runtime_put(dev); return ret; } @@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_suspend_start); -void __suspend_report_result(const char *function, void *fn, int ret) +void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret) { if (ret) - pr_err("%s(): %pS returns %d\n", function, fn, ret); + dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 588588cfda48..415f7664b010 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -596,7 +596,7 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) int error; error = drv->suspend(pci_dev, state); - suspend_report_result(drv->suspend, error); + suspend_report_result(dev, drv->suspend, error); if (error) return error; @@ -775,7 +775,7 @@ static int pci_pm_suspend(struct device *dev) int error; error = pm->suspend(dev); - suspend_report_result(pm->suspend, error); + suspend_report_result(dev, pm->suspend, error); if (error) return error; @@ -821,7 +821,7 @@ static int pci_pm_suspend_noirq(struct device *dev) int error; error = pm->suspend_noirq(dev); - suspend_report_result(pm->suspend_noirq, error); + suspend_report_result(dev, pm->suspend_noirq, error); if (error) return error; @@ -1010,7 +1010,7 @@ static int pci_pm_freeze(struct device *dev) int error; error = pm->freeze(dev); - suspend_report_result(pm->freeze, error); + suspend_report_result(dev, pm->freeze, error); if (error) return error; } @@ -1030,7 +1030,7 @@ static int pci_pm_freeze_noirq(struct device *dev) int error; error = pm->freeze_noirq(dev); - suspend_report_result(pm->freeze_noirq, error); + suspend_report_result(dev, pm->freeze_noirq, error); if (error) return error; } @@ -1116,7 +1116,7 @@ static int pci_pm_poweroff(struct device *dev) int error; error = pm->poweroff(dev); - suspend_report_result(pm->poweroff, error); + suspend_report_result(dev, pm->poweroff, error); if (error) return error; } @@ -1154,7 +1154,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) int error; error = pm->poweroff_noirq(dev); - suspend_report_result(pm->poweroff_noirq, error); + suspend_report_result(dev, pm->poweroff_noirq, error); if (error) return error; } diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index cc6757dfa3f1..c02e7bf643a6 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -171,7 +171,7 @@ static int __pnp_bus_suspend(struct device *dev, pm_message_t state) if (pnp_drv->driver.pm && pnp_drv->driver.pm->suspend) { error = pnp_drv->driver.pm->suspend(dev); - suspend_report_result(pnp_drv->driver.pm->suspend, error); + suspend_report_result(dev, pnp_drv->driver.pm->suspend, error); if (error) return error; } diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index d630cccd2e6e..dd44e37a454a 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -446,7 +446,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) HCD_WAKEUP_PENDING(hcd->shared_hcd)) return -EBUSY; retval = hcd->driver->pci_suspend(hcd, do_wakeup); - suspend_report_result(hcd->driver->pci_suspend, retval); + suspend_report_result(dev, hcd->driver->pci_suspend, retval); /* Check again in case wakeup raced with pci_suspend */ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) || @@ -556,7 +556,7 @@ static int hcd_pci_suspend_noirq(struct device *dev) dev_dbg(dev, "--> PCI %s\n", pci_power_name(pci_dev->current_state)); } else { - suspend_report_result(pci_prepare_to_sleep, retval); + suspend_report_result(dev, pci_prepare_to_sleep, retval); return retval; } diff --git a/include/linux/pm.h b/include/linux/pm.h index f7d2be686359..e65b3ab28377 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -770,11 +770,11 @@ extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); -extern void __suspend_report_result(const char *function, void *fn, int ret); +extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret); -#define suspend_report_result(fn, ret) \ +#define suspend_report_result(dev, fn, ret) \ do { \ - __suspend_report_result(__func__, fn, ret); \ + __suspend_report_result(__func__, dev, fn, ret); \ } while (0) extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); @@ -814,7 +814,7 @@ static inline int dpm_suspend_start(pm_message_t state) return 0; } -#define suspend_report_result(fn, ret) do {} while (0) +#define suspend_report_result(dev, fn, ret) do {} while (0) static inline int device_pm_wait_for_dev(struct device *a, struct device *b) { -- cgit v1.2.3-71-gd317 From 390031c942116d4733310f0684beb8db19885fe6 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 8 Mar 2022 13:04:19 -0600 Subject: coredump: Use the vma snapshot in fill_files_note Matthew Wilcox reported that there is a missing mmap_lock in file_files_note that could possibly lead to a user after free. Solve this by using the existing vma snapshot for consistency and to avoid the need to take the mmap_lock anywhere in the coredump code except for dump_vma_snapshot. Update the dump_vma_snapshot to capture vm_pgoff and vm_file that are neeeded by fill_files_note. Add free_vma_snapshot to free the captured values of vm_file. Reported-by: Matthew Wilcox Link: https://lkml.kernel.org/r/20220131153740.2396974-1-willy@infradead.org Cc: stable@vger.kernel.org Fixes: a07279c9a8cd ("binfmt_elf, binfmt_elf_fdpic: use a VMA list snapshot") Fixes: 2aa362c49c31 ("coredump: extend core dump note section to contain file names of mapped files") Reviewed-by: Kees Cook Signed-off-by: "Eric W. Biederman" --- fs/binfmt_elf.c | 24 ++++++++++++------------ fs/coredump.c | 22 +++++++++++++++++++++- include/linux/coredump.h | 2 ++ 3 files changed, 35 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7f0c391832cf..ca5296cae979 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1641,17 +1641,16 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, * long file_ofs * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... */ -static int fill_files_note(struct memelfnote *note) +static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; unsigned count, size, names_ofs, remaining, n; user_long_t *data; user_long_t *start_end_ofs; char *name_base, *name_curpos; + int i; /* *Estimated* file count and total data size needed */ - count = mm->map_count; + count = cprm->vma_count; if (count > UINT_MAX / 64) return -EINVAL; size = count * 64; @@ -1673,11 +1672,12 @@ static int fill_files_note(struct memelfnote *note) name_base = name_curpos = ((char *)data) + names_ofs; remaining = size - names_ofs; count = 0; - for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { + for (i = 0; i < cprm->vma_count; i++) { + struct core_vma_metadata *m = &cprm->vma_meta[i]; struct file *file; const char *filename; - file = vma->vm_file; + file = m->file; if (!file) continue; filename = file_path(file, name_curpos, remaining); @@ -1697,9 +1697,9 @@ static int fill_files_note(struct memelfnote *note) memmove(name_curpos, filename, n); name_curpos += n; - *start_end_ofs++ = vma->vm_start; - *start_end_ofs++ = vma->vm_end; - *start_end_ofs++ = vma->vm_pgoff; + *start_end_ofs++ = m->start; + *start_end_ofs++ = m->end; + *start_end_ofs++ = m->pgoff; count++; } @@ -1710,7 +1710,7 @@ static int fill_files_note(struct memelfnote *note) * Count usually is less than mm->map_count, * we need to move filenames down. */ - n = mm->map_count - count; + n = cprm->vma_count - count; if (n != 0) { unsigned shift_bytes = n * 3 * sizeof(data[0]); memmove(name_base - shift_bytes, name_base, @@ -1909,7 +1909,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, fill_auxv_note(&info->auxv, current->mm); info->size += notesize(&info->auxv); - if (fill_files_note(&info->files) == 0) + if (fill_files_note(&info->files, cprm) == 0) info->size += notesize(&info->files); return 1; @@ -2098,7 +2098,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, fill_auxv_note(info->notes + 3, current->mm); info->numnote = 4; - if (fill_files_note(info->notes + info->numnote) == 0) { + if (fill_files_note(info->notes + info->numnote, cprm) == 0) { info->notes_files = info->notes + info->numnote; info->numnote++; } diff --git a/fs/coredump.c b/fs/coredump.c index 7f100a637264..7ed7d601e5e0 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -55,6 +55,7 @@ #include static bool dump_vma_snapshot(struct coredump_params *cprm); +static void free_vma_snapshot(struct coredump_params *cprm); static int core_uses_pid; static unsigned int core_pipe_limit; @@ -765,7 +766,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) dump_emit(&cprm, "", 1); } file_end_write(cprm.file); - kvfree(cprm.vma_meta); + free_vma_snapshot(&cprm); } if (ispipe && core_pipe_limit) wait_for_dump_helpers(cprm.file); @@ -1099,6 +1100,20 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, return gate_vma; } +static void free_vma_snapshot(struct coredump_params *cprm) +{ + if (cprm->vma_meta) { + int i; + for (i = 0; i < cprm->vma_count; i++) { + struct file *file = cprm->vma_meta[i].file; + if (file) + fput(file); + } + kvfree(cprm->vma_meta); + cprm->vma_meta = NULL; + } +} + /* * Under the mmap_lock, take a snapshot of relevant information about the task's * VMAs. @@ -1135,6 +1150,11 @@ static bool dump_vma_snapshot(struct coredump_params *cprm) m->end = vma->vm_end; m->flags = vma->vm_flags; m->dump_size = vma_dump_size(vma, cprm->mm_flags); + m->pgoff = vma->vm_pgoff; + + m->file = vma->vm_file; + if (m->file) + get_file(m->file); } mmap_write_unlock(mm); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 7d05370e555e..08a1d3e7e46d 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -12,6 +12,8 @@ struct core_vma_metadata { unsigned long start, end; unsigned long flags; unsigned long dump_size; + unsigned long pgoff; + struct file *file; }; struct coredump_params { -- cgit v1.2.3-71-gd317 From c57bef0287dd5deeabaea5727950559fb9037cd9 Mon Sep 17 00:00:00 2001 From: Barret Rhoden Date: Thu, 6 Jan 2022 12:20:40 -0500 Subject: prlimit: make do_prlimit() static There are no other callers in the kernel. Fixed up a comment format and whitespace issue when moving do_prlimit() higher in sys.c. Signed-off-by: Barret Rhoden Link: https://lkml.kernel.org/r/20220106172041.522167-3-brho@google.com Signed-off-by: Eric W. Biederman --- include/linux/resource.h | 2 - kernel/sys.c | 116 ++++++++++++++++++++++++----------------------- 2 files changed, 59 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/resource.h b/include/linux/resource.h index bdf491cbcab7..4fdbc0c3f315 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -8,7 +8,5 @@ struct task_struct; void getrusage(struct task_struct *p, int who, struct rusage *ru); -int do_prlimit(struct task_struct *tsk, unsigned int resource, - struct rlimit *new_rlim, struct rlimit *old_rlim); #endif diff --git a/kernel/sys.c b/kernel/sys.c index 5b0e172c4d47..d37d1f882b7d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1424,6 +1424,65 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) return errno; } +/* make sure you are allowed to change @tsk limits before calling this */ +static int do_prlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim, struct rlimit *old_rlim) +{ + struct rlimit *rlim; + int retval = 0; + + if (resource >= RLIM_NLIMITS) + return -EINVAL; + if (new_rlim) { + if (new_rlim->rlim_cur > new_rlim->rlim_max) + return -EINVAL; + if (resource == RLIMIT_NOFILE && + new_rlim->rlim_max > sysctl_nr_open) + return -EPERM; + } + + /* protect tsk->signal and tsk->sighand from disappearing */ + read_lock(&tasklist_lock); + if (!tsk->sighand) { + retval = -ESRCH; + goto out; + } + + rlim = tsk->signal->rlim + resource; + task_lock(tsk->group_leader); + if (new_rlim) { + /* + * Keep the capable check against init_user_ns until cgroups can + * contain all limits. + */ + if (new_rlim->rlim_max > rlim->rlim_max && + !capable(CAP_SYS_RESOURCE)) + retval = -EPERM; + if (!retval) + retval = security_task_setrlimit(tsk, resource, new_rlim); + } + if (!retval) { + if (old_rlim) + *old_rlim = *rlim; + if (new_rlim) + *rlim = *new_rlim; + } + task_unlock(tsk->group_leader); + + /* + * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not + * infinite. In case of RLIM_INFINITY the posix CPU timer code + * ignores the rlimit. + */ + if (!retval && new_rlim && resource == RLIMIT_CPU && + new_rlim->rlim_cur != RLIM_INFINITY && + IS_ENABLED(CONFIG_POSIX_TIMERS)) + update_rlimit_cpu(tsk, new_rlim->rlim_cur); +out: + read_unlock(&tasklist_lock); + return retval; +} + SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) { struct rlimit value; @@ -1567,63 +1626,6 @@ static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) rlim->rlim_max = (unsigned long)rlim64->rlim_max; } -/* make sure you are allowed to change @tsk limits before calling this */ -int do_prlimit(struct task_struct *tsk, unsigned int resource, - struct rlimit *new_rlim, struct rlimit *old_rlim) -{ - struct rlimit *rlim; - int retval = 0; - - if (resource >= RLIM_NLIMITS) - return -EINVAL; - if (new_rlim) { - if (new_rlim->rlim_cur > new_rlim->rlim_max) - return -EINVAL; - if (resource == RLIMIT_NOFILE && - new_rlim->rlim_max > sysctl_nr_open) - return -EPERM; - } - - /* protect tsk->signal and tsk->sighand from disappearing */ - read_lock(&tasklist_lock); - if (!tsk->sighand) { - retval = -ESRCH; - goto out; - } - - rlim = tsk->signal->rlim + resource; - task_lock(tsk->group_leader); - if (new_rlim) { - /* Keep the capable check against init_user_ns until - cgroups can contain all limits */ - if (new_rlim->rlim_max > rlim->rlim_max && - !capable(CAP_SYS_RESOURCE)) - retval = -EPERM; - if (!retval) - retval = security_task_setrlimit(tsk, resource, new_rlim); - } - if (!retval) { - if (old_rlim) - *old_rlim = *rlim; - if (new_rlim) - *rlim = *new_rlim; - } - task_unlock(tsk->group_leader); - - /* - * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not - * infinite. In case of RLIM_INFINITY the posix CPU timer code - * ignores the rlimit. - */ - if (!retval && new_rlim && resource == RLIMIT_CPU && - new_rlim->rlim_cur != RLIM_INFINITY && - IS_ENABLED(CONFIG_POSIX_TIMERS)) - update_rlimit_cpu(tsk, new_rlim->rlim_cur); -out: - read_unlock(&tasklist_lock); - return retval; -} - /* rcu lock must be held */ static int check_prlimit_permission(struct task_struct *task, unsigned int flags) -- cgit v1.2.3-71-gd317 From 18c91bb2d87268d23868bf13508f5bc9cf04e89a Mon Sep 17 00:00:00 2001 From: Barret Rhoden Date: Thu, 6 Jan 2022 12:20:41 -0500 Subject: prlimit: do not grab the tasklist_lock Unnecessarily grabbing the tasklist_lock can be a scalability bottleneck for workloads that also must grab the tasklist_lock for waiting, killing, and cloning. The tasklist_lock was grabbed to protect tsk->sighand from disappearing (becoming NULL). tsk->signal was already protected by holding a reference to tsk. update_rlimit_cpu() assumed tsk->sighand != NULL. With this commit, it attempts to lock_task_sighand(). However, this means that update_rlimit_cpu() can fail. This only happens when a task is exiting. Note that during exec, sighand may *change*, but it will not be NULL. Prior to this commit, the do_prlimit() ensured that update_rlimit_cpu() would not fail by read locking the tasklist_lock and checking tsk->sighand != NULL. If update_rlimit_cpu() fails, there may be other tasks that are not exiting that share tsk->signal. However, the group_leader is the last task to be released, so if we cannot update_rlimit_cpu(group_leader), then the entire process is exiting. The only other caller of update_rlimit_cpu() is selinux_bprm_committing_creds(). It has tsk == current, so update_rlimit_cpu() cannot fail (current->sighand cannot disappear until current exits). This change resulted in a 14% speedup on a microbenchmark where parents kill and wait on their children, and children getpriority, setpriority, and getrlimit. Signed-off-by: Barret Rhoden Link: https://lkml.kernel.org/r/20220106172041.522167-4-brho@google.com Signed-off-by: Eric W. Biederman --- include/linux/posix-timers.h | 2 +- kernel/sys.c | 25 ++++++++++++++----------- kernel/time/posix-cpu-timers.c | 12 +++++++++--- 3 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 5bbcd280bfd2..9cf126c3b27f 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -253,7 +253,7 @@ void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, u64 *newval, u64 *oldval); -void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); +int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); void posixtimer_rearm(struct kernel_siginfo *info); #endif diff --git a/kernel/sys.c b/kernel/sys.c index d37d1f882b7d..374f83e95239 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1441,13 +1441,7 @@ static int do_prlimit(struct task_struct *tsk, unsigned int resource, return -EPERM; } - /* protect tsk->signal and tsk->sighand from disappearing */ - read_lock(&tasklist_lock); - if (!tsk->sighand) { - retval = -ESRCH; - goto out; - } - + /* Holding a refcount on tsk protects tsk->signal from disappearing. */ rlim = tsk->signal->rlim + resource; task_lock(tsk->group_leader); if (new_rlim) { @@ -1476,10 +1470,19 @@ static int do_prlimit(struct task_struct *tsk, unsigned int resource, */ if (!retval && new_rlim && resource == RLIMIT_CPU && new_rlim->rlim_cur != RLIM_INFINITY && - IS_ENABLED(CONFIG_POSIX_TIMERS)) - update_rlimit_cpu(tsk, new_rlim->rlim_cur); -out: - read_unlock(&tasklist_lock); + IS_ENABLED(CONFIG_POSIX_TIMERS)) { + /* + * update_rlimit_cpu can fail if the task is exiting, but there + * may be other tasks in the thread group that are not exiting, + * and they need their cpu timers adjusted. + * + * The group_leader is the last task to be released, so if we + * cannot update_rlimit_cpu on it, then the entire process is + * exiting and we do not need to update at all. + */ + update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur); + } + return retval; } diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 96b4e7810426..e13e628509fb 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -34,14 +34,20 @@ void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if * necessary. Needs siglock protection since other code may update the * expiration cache as well. + * + * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and + * we cannot lock_task_sighand. Cannot fail if task is current. */ -void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) +int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) { u64 nsecs = rlim_new * NSEC_PER_SEC; + unsigned long irq_fl; - spin_lock_irq(&task->sighand->siglock); + if (!lock_task_sighand(task, &irq_fl)) + return -ESRCH; set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); - spin_unlock_irq(&task->sighand->siglock); + unlock_task_sighand(task, &irq_fl); + return 0; } /* -- cgit v1.2.3-71-gd317 From 41d36a9f3e5336f5b48c3adba0777b8e217020d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Mar 2022 07:05:28 +0100 Subject: fs: remove kiocb.ki_hint This field is entirely unused now except for a tracepoint in f2fs, so remove it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220308060529.736277-2-hch@lst.de Signed-off-by: Jens Axboe --- fs/aio.c | 1 - fs/cachefiles/io.c | 2 -- fs/f2fs/file.c | 6 ------ fs/io_uring.c | 1 - include/linux/fs.h | 12 ------------ include/trace/events/f2fs.h | 3 +-- 6 files changed, 1 insertion(+), 24 deletions(-) (limited to 'include/linux') diff --git a/fs/aio.c b/fs/aio.c index 4ceba13a7db0..eb0948bb74f1 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1478,7 +1478,6 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) req->ki_flags = iocb_flags(req->ki_filp); if (iocb->aio_flags & IOCB_FLAG_RESFD) req->ki_flags |= IOCB_EVENTFD; - req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp)); if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { /* * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 753986ea1583..bc7c7a7d9260 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -138,7 +138,6 @@ static int cachefiles_read(struct netfs_cache_resources *cres, ki->iocb.ki_filp = file; ki->iocb.ki_pos = start_pos + skipped; ki->iocb.ki_flags = IOCB_DIRECT; - ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file)); ki->iocb.ki_ioprio = get_current_ioprio(); ki->skipped = skipped; ki->object = object; @@ -313,7 +312,6 @@ static int cachefiles_write(struct netfs_cache_resources *cres, ki->iocb.ki_filp = file; ki->iocb.ki_pos = start_pos; ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE; - ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file)); ki->iocb.ki_ioprio = get_current_ioprio(); ki->object = object; ki->inval_counter = cres->inval_counter; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 3c98ef6af97d..45076c01a2ba 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4479,10 +4479,8 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from, struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); const bool do_opu = f2fs_lfs_mode(sbi); - const int whint_mode = F2FS_OPTION(sbi).whint_mode; const loff_t pos = iocb->ki_pos; const ssize_t count = iov_iter_count(from); - const enum rw_hint hint = iocb->ki_hint; unsigned int dio_flags; struct iomap_dio *dio; ssize_t ret; @@ -4515,8 +4513,6 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from, if (do_opu) down_read(&fi->i_gc_rwsem[READ]); } - if (whint_mode == WHINT_MODE_OFF) - iocb->ki_hint = WRITE_LIFE_NOT_SET; /* * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of @@ -4539,8 +4535,6 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from, ret = iomap_dio_complete(dio); } - if (whint_mode == WHINT_MODE_OFF) - iocb->ki_hint = hint; if (do_opu) up_read(&fi->i_gc_rwsem[READ]); up_read(&fi->i_gc_rwsem[WRITE]); diff --git a/fs/io_uring.c b/fs/io_uring.c index 4715980e9015..36e09169ff93 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3747,7 +3747,6 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file)); return io_prep_rw(req, sqe); } diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..d5658ac5d8c6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -327,7 +327,6 @@ struct kiocb { void (*ki_complete)(struct kiocb *iocb, long ret); void *private; int ki_flags; - u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ struct wait_page_queue *ki_waitq; /* for async buffered IO */ randomized_struct_fields_end @@ -2225,21 +2224,11 @@ static inline enum rw_hint file_write_hint(struct file *file) static inline int iocb_flags(struct file *file); -static inline u16 ki_hint_validate(enum rw_hint hint) -{ - typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; - - if (hint <= max_hint) - return hint; - return 0; -} - static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = iocb_flags(filp), - .ki_hint = ki_hint_validate(file_write_hint(filp)), .ki_ioprio = get_current_ioprio(), }; } @@ -2250,7 +2239,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = kiocb_src->ki_flags, - .ki_hint = kiocb_src->ki_hint, .ki_ioprio = kiocb_src->ki_ioprio, .ki_pos = kiocb_src->ki_pos, }; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index f701bb23f83c..1779e133cea0 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -956,12 +956,11 @@ TRACE_EVENT(f2fs_direct_IO_enter, __entry->rw = rw; ), - TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_hint = %x ki_ioprio = %x rw = %d", + TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d", show_dev_ino(__entry), __entry->iocb->ki_pos, __entry->len, __entry->iocb->ki_flags, - __entry->iocb->ki_hint, __entry->iocb->ki_ioprio, __entry->rw) ); -- cgit v1.2.3-71-gd317 From 7b12e49669c99f63bc12351c57e581f1f14d4adf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Mar 2022 07:05:29 +0100 Subject: fs: remove fs.f_write_hint The value is now completely unused except for reporting it back through the F_GET_FILE_RW_HINT ioctl, so remove the value and the two ioctls for it. Trying to use the F_SET_FILE_RW_HINT and F_GET_FILE_RW_HINT fcntls will now return EINVAL, just like it would on a kernel that never supported this functionality in the first place. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220308060529.736277-3-hch@lst.de Signed-off-by: Jens Axboe --- fs/fcntl.c | 18 ------------------ fs/open.c | 1 - include/linux/fs.h | 9 --------- 3 files changed, 28 deletions(-) (limited to 'include/linux') diff --git a/fs/fcntl.c b/fs/fcntl.c index 9c6c6a3e2de5..f15d885b9796 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -291,22 +291,6 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd, u64 h; switch (cmd) { - case F_GET_FILE_RW_HINT: - h = file_write_hint(file); - if (copy_to_user(argp, &h, sizeof(*argp))) - return -EFAULT; - return 0; - case F_SET_FILE_RW_HINT: - if (copy_from_user(&h, argp, sizeof(h))) - return -EFAULT; - hint = (enum rw_hint) h; - if (!rw_hint_valid(hint)) - return -EINVAL; - - spin_lock(&file->f_lock); - file->f_write_hint = hint; - spin_unlock(&file->f_lock); - return 0; case F_GET_RW_HINT: h = inode->i_write_hint; if (copy_to_user(argp, &h, sizeof(*argp))) @@ -431,8 +415,6 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, break; case F_GET_RW_HINT: case F_SET_RW_HINT: - case F_GET_FILE_RW_HINT: - case F_SET_FILE_RW_HINT: err = fcntl_rw_hint(filp, cmd, arg); break; default: diff --git a/fs/open.c b/fs/open.c index 9ff2f621b760..1315253e0247 100644 --- a/fs/open.c +++ b/fs/open.c @@ -835,7 +835,6 @@ static int do_dentry_open(struct file *f, likely(f->f_op->write || f->f_op->write_iter)) f->f_mode |= FMODE_CAN_WRITE; - f->f_write_hint = WRITE_LIFE_NOT_SET; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); diff --git a/include/linux/fs.h b/include/linux/fs.h index d5658ac5d8c6..a1fc3b41cd82 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -966,7 +966,6 @@ struct file { * Must not be taken from IRQ context. */ spinlock_t f_lock; - enum rw_hint f_write_hint; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; @@ -2214,14 +2213,6 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, !gid_valid(i_gid_into_mnt(mnt_userns, inode)); } -static inline enum rw_hint file_write_hint(struct file *file) -{ - if (file->f_write_hint != WRITE_LIFE_NOT_SET) - return file->f_write_hint; - - return file_inode(file)->i_write_hint; -} - static inline int iocb_flags(struct file *file); static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) -- cgit v1.2.3-71-gd317 From 4e5cc99e1e485954a9c09872e0eeea570fb2b5a5 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 8 Mar 2022 15:32:19 +0800 Subject: blk-mq: manage hctx map via xarray First code becomes more clean by switching to xarray from plain array. Second use-after-free on q->queue_hw_ctx can be fixed because queue_for_each_hw_ctx() may be run when updating nr_hw_queues is in-progress. With this patch, q->hctx_table is defined as xarray, and this structure will share same lifetime with request queue, so queue_for_each_hw_ctx() can use q->hctx_table to lookup hctx reliably. Reported-by: Yu Kuai Signed-off-by: Ming Lei Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220308073219.91173-7-ming.lei@redhat.com [axboe: fix blk_mq_hw_ctx forward declaration] Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.h | 2 ++ block/blk-mq-tag.c | 2 +- block/blk-mq.c | 62 +++++++++++++++++++------------------------------- block/blk-mq.h | 2 +- include/linux/blk-mq.h | 3 +-- include/linux/blkdev.h | 2 +- 6 files changed, 30 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h index a68aa6041a10..69918f4170d6 100644 --- a/block/blk-mq-debugfs.h +++ b/block/blk-mq-debugfs.h @@ -6,6 +6,8 @@ #include +struct blk_mq_hw_ctx; + struct blk_mq_debugfs_attr { const char *name; umode_t mode; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 1850a4225e12..68ac23d0b640 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -498,7 +498,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, void *priv) { /* - * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx + * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table * while the queue is frozen. So we can use q_usage_counter to avoid * racing with it. */ diff --git a/block/blk-mq.c b/block/blk-mq.c index bffdd71c670d..e8d6d7a05a43 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -71,7 +71,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { - return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; + return xa_load(&q->hctx_table, + (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT); } static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, @@ -573,7 +574,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, * If not tell the caller that it should skip this queue. */ ret = -EXDEV; - data.hctx = q->queue_hw_ctx[hctx_idx]; + data.hctx = xa_load(&q->hctx_table, hctx_idx); if (!blk_mq_hw_queue_mapped(data.hctx)) goto out_queue_exit; cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); @@ -3437,6 +3438,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, blk_mq_remove_cpuhp(hctx); + xa_erase(&q->hctx_table, hctx_idx); + spin_lock(&q->unused_hctx_lock); list_add(&hctx->hctx_list, &q->unused_hctx_list); spin_unlock(&q->unused_hctx_lock); @@ -3476,8 +3479,15 @@ static int blk_mq_init_hctx(struct request_queue *q, if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) goto exit_hctx; + + if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) + goto exit_flush_rq; + return 0; + exit_flush_rq: + if (set->ops->exit_request) + set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -3856,7 +3866,7 @@ void blk_mq_release(struct request_queue *q) kobject_put(&hctx->kobj); } - kfree(q->queue_hw_ctx); + xa_destroy(&q->hctx_table); /* * release .mq_kobj and sw queue's kobject now because @@ -3945,46 +3955,28 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { - int i, j, end; - struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; - - if (q->nr_hw_queues < set->nr_hw_queues) { - struct blk_mq_hw_ctx **new_hctxs; - - new_hctxs = kcalloc_node(set->nr_hw_queues, - sizeof(*new_hctxs), GFP_KERNEL, - set->numa_node); - if (!new_hctxs) - return; - if (hctxs) - memcpy(new_hctxs, hctxs, q->nr_hw_queues * - sizeof(*hctxs)); - q->queue_hw_ctx = new_hctxs; - kfree(hctxs); - hctxs = new_hctxs; - } + struct blk_mq_hw_ctx *hctx; + unsigned long i, j; /* protect against switching io scheduler */ mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int old_node; int node = blk_mq_get_hctx_node(set, i); - struct blk_mq_hw_ctx *old_hctx = hctxs[i]; + struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); if (old_hctx) { old_node = old_hctx->numa_node; blk_mq_exit_hctx(q, set, old_hctx, i); } - hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node); - if (!hctxs[i]) { + if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { if (!old_hctx) break; pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", node, old_node); - hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, - old_node); - WARN_ON_ONCE(!hctxs[i]); + hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); + WARN_ON_ONCE(!hctx); } } /* @@ -3993,21 +3985,13 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, */ if (i != set->nr_hw_queues) { j = q->nr_hw_queues; - end = i; } else { j = i; - end = q->nr_hw_queues; q->nr_hw_queues = set->nr_hw_queues; } - for (; j < end; j++) { - struct blk_mq_hw_ctx *hctx = hctxs[j]; - - if (hctx) { - blk_mq_exit_hctx(q, set, hctx, j); - hctxs[j] = NULL; - } - } + xa_for_each_start(&q->hctx_table, j, hctx, j) + blk_mq_exit_hctx(q, set, hctx, j); mutex_unlock(&q->sysfs_lock); } @@ -4046,6 +4030,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, INIT_LIST_HEAD(&q->unused_hctx_list); spin_lock_init(&q->unused_hctx_lock); + xa_init(&q->hctx_table); + blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; @@ -4075,7 +4061,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, return 0; err_hctxs: - kfree(q->queue_hw_ctx); + xa_destroy(&q->hctx_table); q->nr_hw_queues = 0; blk_mq_sysfs_deinit(q); err_poll: diff --git a/block/blk-mq.h b/block/blk-mq.h index 948791ea2a3e..2615bd58bad3 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -83,7 +83,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * enum hctx_type type, unsigned int cpu) { - return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; + return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); } static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3a41d50b85d3..7aa5c54901a9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -917,8 +917,7 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) } #define queue_for_each_hw_ctx(q, hctx, i) \ - for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + xa_for_each(&(q)->hctx_table, (i), (hctx)) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e19947d84f12..d982171469bc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -355,7 +355,7 @@ struct request_queue { unsigned int queue_depth; /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct xarray hctx_table; unsigned int nr_hw_queues; /* -- cgit v1.2.3-71-gd317 From 1330b6ef3313fcec577d2b020c290dc8b9f11f1a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 7 Mar 2022 16:44:21 -0800 Subject: skb: make drop reason booleanable We have a number of cases where function returns drop/no drop decision as a boolean. Now that we want to report the reason code as well we have to pass extra output arguments. We can make the reason code evaluate correctly as bool. I believe we're good to reorder the reasons as they are reported to user space as strings. Signed-off-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/net/tcp.h | 21 +++++++++++---------- net/ipv4/tcp.c | 21 +++++++++------------ net/ipv4/tcp_ipv4.c | 12 +++++++----- net/ipv6/tcp_ipv6.c | 11 +++++++---- 5 files changed, 35 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 34f572271c0c..26538ceb4b01 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -314,6 +314,7 @@ struct sk_buff; * used to translate the reason to string. */ enum skb_drop_reason { + SKB_NOT_DROPPED_YET = 0, SKB_DROP_REASON_NOT_SPECIFIED, /* drop reason is not specified */ SKB_DROP_REASON_NO_SOCKET, /* socket not found */ SKB_DROP_REASON_PKT_TOO_SMALL, /* packet size is too small */ diff --git a/include/net/tcp.h b/include/net/tcp.h index d486d7b6112d..ee8237b58e1d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1674,10 +1674,11 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, return NULL; return __tcp_md5_do_lookup(sk, l3index, addr, family); } -bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - enum skb_drop_reason *reason, - const void *saddr, const void *daddr, - int family, int dif, int sdif); + +enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif); #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) @@ -1688,13 +1689,13 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, { return NULL; } -static inline bool tcp_inbound_md5_hash(const struct sock *sk, - const struct sk_buff *skb, - enum skb_drop_reason *reason, - const void *saddr, const void *daddr, - int family, int dif, int sdif) + +static inline enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif); { - return false; + return SKB_NOT_DROPPED_YET; } #define tcp_twsk_md5_key(twsk) NULL #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 33f20134e3f1..b5f032958b2c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4434,10 +4434,10 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *ke EXPORT_SYMBOL(tcp_md5_hash_key); /* Called with rcu_read_lock() */ -bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - enum skb_drop_reason *reason, - const void *saddr, const void *daddr, - int family, int dif, int sdif) +enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif) { /* * This gets called for each TCP segment that arrives @@ -4464,18 +4464,16 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) - return false; + return SKB_NOT_DROPPED_YET; if (hash_expected && !hash_location) { - *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); - return true; + return SKB_DROP_REASON_TCP_MD5NOTFOUND; } if (!hash_expected && hash_location) { - *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); - return true; + return SKB_DROP_REASON_TCP_MD5UNEXPECTED; } /* check the signature */ @@ -4483,7 +4481,6 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { - *reason = SKB_DROP_REASON_TCP_MD5FAILURE; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); if (family == AF_INET) { net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", @@ -4497,9 +4494,9 @@ bool tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, saddr, ntohs(th->source), daddr, ntohs(th->dest), l3index); } - return true; + return SKB_DROP_REASON_TCP_MD5FAILURE; } - return false; + return SKB_NOT_DROPPED_YET; } EXPORT_SYMBOL(tcp_inbound_md5_hash); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 411357ad9757..81694a354110 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1965,9 +1965,10 @@ process: struct sock *nsk; sk = req->rsk_listener; - if (unlikely(tcp_inbound_md5_hash(sk, skb, &drop_reason, - &iph->saddr, &iph->daddr, - AF_INET, dif, sdif))) { + drop_reason = tcp_inbound_md5_hash(sk, skb, + &iph->saddr, &iph->daddr, + AF_INET, dif, sdif); + if (unlikely(drop_reason)) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -2041,8 +2042,9 @@ process: goto discard_and_relse; } - if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &iph->saddr, - &iph->daddr, AF_INET, dif, sdif)) + drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr, + &iph->daddr, AF_INET, dif, sdif); + if (drop_reason) goto discard_and_relse; nf_reset_ct(skb); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index cb2bb7d2e907..13678d3908fa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1632,8 +1632,10 @@ process: struct sock *nsk; sk = req->rsk_listener; - if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &hdr->saddr, - &hdr->daddr, AF_INET6, dif, sdif)) { + drop_reason = tcp_inbound_md5_hash(sk, skb, + &hdr->saddr, &hdr->daddr, + AF_INET6, dif, sdif); + if (drop_reason) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -1704,8 +1706,9 @@ process: goto discard_and_relse; } - if (tcp_inbound_md5_hash(sk, skb, &drop_reason, &hdr->saddr, - &hdr->daddr, AF_INET6, dif, sdif)) + drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr, + AF_INET6, dif, sdif); + if (drop_reason) goto discard_and_relse; if (tcp_filter(sk, skb)) { -- cgit v1.2.3-71-gd317 From d8fd5a1e78db375f2246d43df7833fec07a221cd Mon Sep 17 00:00:00 2001 From: Joey Gouly Date: Tue, 1 Mar 2022 15:45:18 +0000 Subject: kasan: fix a missing header include of static_keys.h The kasan-enabled.h header relies on static keys, so make sure to include the header to avoid compilation errors (with JUMP_LABEL=n). It fixes the following: ./include/linux/kasan-enabled.h:9:1: warning: data definition has no type or storage class 9 | DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); | ^~~~~~~~~~~~~~~~~~~~~~~~ error: type defaults to 'int' in declaration of 'DECLARE_STATIC_KEY_FALSE' [-Werror=implicit-int] Fixes: f9b5e46f4097eb29 ("kasan: split kasan_*enabled() functions into a separate header") Cc: Peter Collingbourne Cc: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Acked-by: Andrey Konovalov Signed-off-by: Joey Gouly Link: https://lore.kernel.org/r/20220301154518.19456-1-joey.gouly@arm.com Signed-off-by: Will Deacon --- include/linux/kasan-enabled.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h index 4b6615375022..6f612d69ea0c 100644 --- a/include/linux/kasan-enabled.h +++ b/include/linux/kasan-enabled.h @@ -2,6 +2,8 @@ #ifndef _LINUX_KASAN_ENABLED_H #define _LINUX_KASAN_ENABLED_H +#include + #ifdef CONFIG_KASAN_HW_TAGS DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); -- cgit v1.2.3-71-gd317 From f804360bb3a50decbed6e2761247964dca72c080 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Sat, 26 Feb 2022 22:41:25 +0100 Subject: clk: qcom: smd: Add missing RPM clocks for msm8992/4 XO and MSS_CFG were omitted when first adding the clocks for these SoCs. Add them, and while at it, move the XO clock to the top of the definition list, as ideally everyone should start using it sooner or later.. Fixes: b4297844995f ("clk: qcom: smd: Add support for MSM8992/4 rpm clocks") Signed-off-by: Konrad Dybcio Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220226214126.21209-2-konrad.dybcio@somainline.org --- drivers/clk/qcom/clk-smd-rpm.c | 13 +++++++++++-- include/linux/soc/qcom/smd-rpm.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index ea28e45ca371..418f017e933f 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -413,6 +413,7 @@ static const struct clk_ops clk_smd_rpm_branch_ops = { .recalc_rate = clk_smd_rpm_recalc_rate, }; +DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000); DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); @@ -604,7 +605,11 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8, 19200000); DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1); +DEFINE_CLK_SMD_RPM_BRANCH(msm8992, mss_cfg_ahb_clk, mss_cfg_ahb_a_clk, + QCOM_SMD_RPM_MCFG_CLK, 0, 19200000); static struct clk_smd_rpm *msm8992_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk, [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk, [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk, @@ -637,6 +642,8 @@ static struct clk_smd_rpm *msm8992_clks[] = { [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk, [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk, [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk, + [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk, + [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk, [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk, [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk, [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1, @@ -661,6 +668,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8992 = { DEFINE_CLK_SMD_RPM(msm8994, ce3_clk, ce3_a_clk, QCOM_SMD_RPM_CE_CLK, 2); static struct clk_smd_rpm *msm8994_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, [RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk, [RPM_SMD_PNOC_A_CLK] = &msm8916_pcnoc_a_clk, [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk, @@ -693,6 +702,8 @@ static struct clk_smd_rpm *msm8994_clks[] = { [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk, [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk, [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk, + [RPM_SMD_MSS_CFG_AHB_CLK] = &msm8992_mss_cfg_ahb_clk, + [RPM_SMD_MSS_CFG_AHB_A_CLK] = &msm8992_mss_cfg_ahb_a_clk, [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk, [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk, [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1, @@ -857,8 +868,6 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { .num_clks = ARRAY_SIZE(msm8998_clks), }; -DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, - 19200000); DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000); DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3, 19200000); diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 860dd8cdf9f3..82c9d489833a 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -40,6 +40,7 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_AGGR_CLK 0x72676761 #define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768 #define QCOM_SMD_RPM_PKA_CLK 0x616b70 +#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, int state, -- cgit v1.2.3-71-gd317 From 69fe0f29892077f14b56e2a479b6bcf533209d53 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 4 Mar 2022 21:08:03 -0500 Subject: block: add ->poll_bio to block_device_operations Prepare for supporting IO polling for bio-based driver. Add ->poll_bio callback so that bio-based driver can provide their own logic for polling bio. Also fix ->submit_bio_bio typo in comment block above __submit_bio_noacct. Reviewed-by: Christoph Hellwig Reviewed-by: Jens Axboe Signed-off-by: Ming Lei Signed-off-by: Mike Snitzer --- block/blk-core.c | 14 +++++++++----- block/genhd.c | 4 ++++ include/linux/blkdev.h | 2 ++ 3 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 94bf37f8e61d..ce08f0aa9dfc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -708,7 +708,7 @@ static void __submit_bio(struct bio *bio) * * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. * bio_list_on_stack[1] contains bios that were submitted before the current - * ->submit_bio_bio, but that haven't been processed yet. + * ->submit_bio, but that haven't been processed yet. */ static void __submit_bio_noacct(struct bio *bio) { @@ -975,7 +975,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); blk_qc_t cookie = READ_ONCE(bio->bi_cookie); - int ret; + int ret = 0; if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) @@ -985,10 +985,14 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) return 0; - if (WARN_ON_ONCE(!queue_is_mq(q))) - ret = 0; /* not yet implemented, should not happen */ - else + if (queue_is_mq(q)) { ret = blk_mq_poll(q, cookie, iob, flags); + } else { + struct gendisk *disk = q->disk; + + if (disk && disk->fops->poll_bio) + ret = disk->fops->poll_bio(bio, iob, flags); + } blk_queue_exit(q); return ret; } diff --git a/block/genhd.c b/block/genhd.c index e351fac41bf2..1ed46a6f94f5 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -410,6 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, struct device *ddev = disk_to_dev(disk); int ret; + /* Only makes sense for bio-based to set ->poll_bio */ + if (queue_is_mq(disk->queue) && disk->fops->poll_bio) + return -EINVAL; + /* * The disk queue should now be all set with enough information about * the device for the elevator code to pick an adequate default diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f757f9c2871f..51f1b1ddbed2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1455,6 +1455,8 @@ enum blk_unique_id { struct block_device_operations { void (*submit_bio)(struct bio *bio); + int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, + unsigned int flags); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); -- cgit v1.2.3-71-gd317 From ac77998b7ac3044f0509b097da9637184598980d Mon Sep 17 00:00:00 2001 From: Mohammad Kabat Date: Thu, 25 Mar 2021 14:38:55 +0200 Subject: net/mlx5: Fix size field in bufferx_reg struct According to HW spec the field "size" should be 16 bits in bufferx register. Fixes: e281682bf294 ("net/mlx5_core: HW data structs/types definitions cleanup") Signed-off-by: Mohammad Kabat Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 598ac3bcc901..5743f5b3414b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9900,8 +9900,8 @@ struct mlx5_ifc_bufferx_reg_bits { u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; - u8 reserved_at_8[0xc]; - u8 size[0xc]; + u8 reserved_at_8[0x8]; + u8 size[0x10]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; -- cgit v1.2.3-71-gd317 From 99a2b9be077ae3a5d97fbf5f7782e0f2e9812978 Mon Sep 17 00:00:00 2001 From: Ben Ben-Ishay Date: Wed, 2 Mar 2022 17:07:08 +0200 Subject: net/mlx5e: SHAMPO, reduce TIR indication SHAMPO is an RQ / WQ feature, an indication was added to the TIR in the first place to enforce suitability between connected TIR and RQ, this enforcement does not exist in current the Firmware implementation and was redundant in the first place. Fixes: 83439f3c37aa ("net/mlx5e: Add HW-GRO offload") Signed-off-by: Ben Ben-Ishay Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tir.c | 3 --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-- include/linux/mlx5/mlx5_ifc.h | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index da169b816665..d4239e3b3c88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder, (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout); break; - case MLX5E_PACKET_MERGE_SHAMPO: - MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO); - break; default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bf80fb612449..3667f5ef5990 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5743f5b3414b..49a48d7709ac 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -3434,7 +3434,6 @@ enum { enum { MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0), MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1), - MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2), }; enum { -- cgit v1.2.3-71-gd317 From 34f46ae0d4b38e83cfb26fb6f06b5b5efea47fdc Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 27 Jan 2022 15:22:21 +0200 Subject: net/mlx5: Add command failures data to debugfs Add new counters to command interface debugfs to count command failures. The following counters added: total_failed - number of times command failed (any kind of failure). failed_mbox_status - number of times command failed on bad status returned by FW. In addition, add data about last command failure to command interface debugfs: last_failed_errno - last command failed returned errno. last_failed_mbox_status - last bad status returned by FW. Signed-off-by: Moshe Shemesh Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 44 ++++++++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 7 ++++ include/linux/mlx5/driver.h | 9 +++++ 3 files changed, 51 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 823d5808d5a0..8933c00067e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1877,16 +1877,38 @@ out_in: return err; } +static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err) +{ + struct mlx5_cmd_stats *stats; + + if (!err) + return; + + stats = &dev->cmd.stats[opcode]; + spin_lock_irq(&stats->lock); + stats->failed++; + if (err < 0) + stats->last_failed_errno = -err; + if (err == -EREMOTEIO) { + stats->failed_mbox_status++; + stats->last_failed_mbox_status = status; + } + spin_unlock_irq(&stats->lock); +} + /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ -static int cmd_status_err(int err, void *out) +static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) { - if (err) /* -EREMOTEIO is preserved */ - return err == -EREMOTEIO ? -EIO : err; + u8 status = MLX5_GET(mbox_out, out, status); - if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) - return -EREMOTEIO; + if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ + err = -EIO; - return 0; + if (!err && status != MLX5_CMD_STAT_OK) + err = -EREMOTEIO; + + cmd_status_log(dev, opcode, status, err); + return err; } /** @@ -1910,8 +1932,10 @@ static int cmd_status_err(int err, void *out) int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + u16 opcode = MLX5_GET(mbox_in, in, opcode); - return cmd_status_err(err, out); + err = cmd_status_err(dev, err, opcode, out); + return err; } EXPORT_SYMBOL(mlx5_cmd_do); @@ -1954,8 +1978,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + u16 opcode = MLX5_GET(mbox_in, in, opcode); - err = cmd_status_err(err, out); + err = cmd_status_err(dev, err, opcode, out); return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec_polling); @@ -1991,7 +2016,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) struct mlx5_async_ctx *ctx; ctx = work->ctx; - status = cmd_status_err(status, work->out); + status = cmd_status_err(ctx->dev, status, work->opcode, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) wake_up(&ctx->wait); @@ -2005,6 +2030,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; + work->opcode = MLX5_GET(mbox_in, in, opcode); work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 10d195042ab5..18b04e977bb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -180,6 +180,13 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_file("average", 0400, stats->root, stats, &stats_fops); debugfs_create_u64("n", 0400, stats->root, &stats->n); + debugfs_create_u64("failed", 0400, stats->root, &stats->failed); + debugfs_create_u64("failed_mbox_status", 0400, stats->root, + &stats->failed_mbox_status); + debugfs_create_u32("last_failed_errno", 0400, stats->root, + &stats->last_failed_errno); + debugfs_create_u8("last_failed_mbox_status", 0400, stats->root, + &stats->last_failed_mbox_status); } } } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d3b1a6a1f8d2..f18c1e15a12c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -264,6 +264,14 @@ enum { struct mlx5_cmd_stats { u64 sum; u64 n; + /* number of times command failed */ + u64 failed; + /* number of times command failed on bad status returned by FW */ + u64 failed_mbox_status; + /* last command failed returned errno */ + u32 last_failed_errno; + /* last bad status returned by FW */ + u8 last_failed_mbox_status; struct dentry *root; /* protect command average calculations */ spinlock_t lock; @@ -955,6 +963,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; + u16 opcode; /* cmd opcode */ void *out; /* pointer to the cmd output buffer */ }; -- cgit v1.2.3-71-gd317 From d2cb8dda214fb75f946ba554a1ecd25da7004b2b Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 26 Jan 2022 07:23:53 +0200 Subject: net/mlx5: Change release_all_pages cap bit location mlx5 FW has changed release_all_pages cap bit by one bit offset to reflect a fix in the FW flow for release_all_pages. The driver should use the new bit to ensure it calls release_all_pages only if the FW fix is there. Signed-off-by: Moshe Shemesh Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ea65131835ab..69985e4d8dfe 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1419,8 +1419,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x6]; + u8 reserved_at_140[0x5]; u8 release_all_pages[0x1]; + u8 must_not_use[0x1]; u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; -- cgit v1.2.3-71-gd317 From 66771a1c729e816dc84e29ba555013b6e722fb22 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Fri, 18 Feb 2022 09:36:20 +0200 Subject: net/mlx5: Move debugfs entries to separate struct Move the debugfs entry pointers under priv to their own struct. Add get function for device debugfs root. Signed-off-by: Moshe Shemesh Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cong.c | 3 +-- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 30 +++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/main.c | 8 +++--- .../ethernet/mellanox/mlx5/core/steering/dr_dbg.c | 2 +- include/linux/mlx5/driver.h | 17 +++++++----- 8 files changed, 37 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index 0b61df52332a..290ea8ac3838 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -433,8 +433,7 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) dev->port[port_num].dbg_cc_params = dbg_cc_params; - dbg_cc_params->root = debugfs_create_dir("cc_params", - mdev->priv.dbg_root); + dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev)); for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) { dbg_cc_params->params[i].offset = i; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 85f526c861e9..32a0ea820573 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4178,7 +4178,7 @@ static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root) return 0; - root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); + root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev)); dev->delay_drop.dir_debugfs = root; debugfs_create_atomic_t("num_timeout_events", 0400, root, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 06e4b8cea6bd..32cb7068f0ca 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -696,7 +696,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root || dev->is_rep) return; - cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); + cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 8933c00067e8..3b61224e9a3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1543,7 +1543,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) { struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 18b04e977bb8..883e44457367 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -99,26 +99,32 @@ void mlx5_unregister_debugfs(void) debugfs_remove(mlx5_debugfs_root); } +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev) +{ + return dev->priv.dbg.dbg_root; +} +EXPORT_SYMBOL(mlx5_debugfs_get_dev_root); + void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root); } EXPORT_SYMBOL(mlx5_qp_debugfs_init); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.qp_debugfs); + debugfs_remove_recursive(dev->priv.dbg.qp_debugfs); } EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup); void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root); } void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.eq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.eq_debugfs); } static ssize_t average_read(struct file *filp, char __user *buf, size_t count, @@ -168,8 +174,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) const char *namep; int i; - cmd = &dev->priv.cmdif_debugfs; - *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + cmd = &dev->priv.dbg.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root); for (i = 0; i < MLX5_CMD_OP_MAX; i++) { stats = &dev->cmd.stats[i]; @@ -193,17 +199,17 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cmdif_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs); } void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root); } void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cq_debugfs); } static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, @@ -448,7 +454,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs, &qp->dbg, qp->qpn, qp_fields, ARRAY_SIZE(qp_fields), qp); if (err) @@ -475,7 +481,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs, &eq->dbg, eq->eqn, eq_fields, ARRAY_SIZE(eq_fields), eq); if (err) @@ -500,7 +506,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs, &cq->dbg, cq->cqn, cq_fields, ARRAY_SIZE(cq_fields), cq); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 98be7050aa8d..d8d36477b97f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1487,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) INIT_LIST_HEAD(&priv->pgdir_list); priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); - priv->dbg_root = debugfs_create_dir(dev_name(dev->device), - mlx5_debugfs_root); + priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), + mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); err = mlx5_tout_init(dev); @@ -1524,7 +1524,7 @@ err_pagealloc_init: err_health_init: mlx5_tout_cleanup(dev); err_timeout_init: - debugfs_remove(dev->priv.dbg_root); + debugfs_remove(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); @@ -1542,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_tout_cleanup(dev); - debugfs_remove_recursive(dev->priv.dbg_root); + debugfs_remove_recursive(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index 2784cd59fefe..2e8b109fb34e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -630,7 +630,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn) } dmn->dump_info.steering_debugfs = - debugfs_create_dir("steering", dev->priv.dbg_root); + debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev)); dmn->dump_info.fdb_debugfs = debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f18c1e15a12c..aa539d245a12 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -551,6 +551,14 @@ struct mlx5_adev { int idx; }; +struct mlx5_debugfs_entries { + struct dentry *dbg_root; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; +}; + struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ @@ -570,12 +578,7 @@ struct mlx5_priv { struct mlx5_core_health health; struct list_head traps; - /* start: qp staff */ - struct dentry *qp_debugfs; - struct dentry *eq_debugfs; - struct dentry *cq_debugfs; - struct dentry *cmdif_debugfs; - /* end: qp staff */ + struct mlx5_debugfs_entries dbg; /* start: alloc staff */ /* protect buffer allocation according to numa node */ @@ -585,7 +588,6 @@ struct mlx5_priv { struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ - struct dentry *dbg_root; struct list_head ctx_list; spinlock_t ctx_lock; @@ -1038,6 +1040,7 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, -- cgit v1.2.3-71-gd317 From 4e05cbf05c66ca6931ee4f2a5aff0d84f1e65f40 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 27 Jan 2022 07:03:33 +0200 Subject: net/mlx5: Add pages debugfs Add pages debugfs to expose the following counters for debuggability: fw_pages_total - How many pages were given to FW and not returned yet. vfs_pages - For SRIOV, how many pages were given to FW for virtual functions usage. host_pf_pages - For ECPF, how many pages were given to FW for external hosts physical functions usage. Signed-off-by: Moshe Shemesh Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 17 +++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 2 ++ include/linux/mlx5/driver.h | 9 ++++++--- 3 files changed, 25 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 883e44457367..8673ba2df910 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -212,6 +212,23 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) debugfs_remove_recursive(dev->priv.dbg.cq_debugfs); } +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) +{ + struct dentry *pages; + + dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root); + pages = dev->priv.dbg.pages_debugfs; + + debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); + debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); + debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); +} + +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + debugfs_remove_recursive(dev->priv.dbg.pages_debugfs); +} + static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int index, int *is_str) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index d29a305858ad..8855fe71d480 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -714,12 +714,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev) return -ENOMEM; xa_init(&dev->priv.page_root_xa); + mlx5_pages_debugfs_init(dev); return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + mlx5_pages_debugfs_cleanup(dev); xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa539d245a12..c5f93b5910ed 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -557,6 +557,7 @@ struct mlx5_debugfs_entries { struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; + struct dentry *pages_debugfs; }; struct mlx5_ft_pool; @@ -569,11 +570,11 @@ struct mlx5_priv { struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct xarray page_root_xa; - int fw_pages; + u32 fw_pages; atomic_t reg_pages; struct list_head free_list; - int vfs_pages; - int host_pf_pages; + u32 vfs_pages; + u32 host_pf_pages; struct mlx5_core_health health; struct list_head traps; @@ -1026,6 +1027,8 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); -- cgit v1.2.3-71-gd317 From 32071187e9fb18da62f5be569bd2ea0d7a981ee8 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 27 Jan 2022 07:51:14 +0200 Subject: net/mlx5: Add debugfs counters for page commands failures Add the following new debugfs counters for debug and verbosity: fw_pages_alloc_failed - number of pages FW requested but driver failed to allocate. give_pages_dropped - number of pages given to FW, but command give pages failed by FW. reclaim_pages_discard - number of pages which were about to reclaim back and FW failed the command. Signed-off-by: Moshe Shemesh Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 14 +++++++++++--- include/linux/mlx5/driver.h | 3 +++ 3 files changed, 18 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 8673ba2df910..d69bac93a83b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -222,6 +222,10 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); + debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); + debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); + debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages, + &dev->priv.reclaim_pages_discard); } void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 8855fe71d480..e0543b860144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -352,8 +352,10 @@ retry: if (err) { if (err == -ENOMEM) err = alloc_system_page(dev, function); - if (err) + if (err) { + dev->priv.fw_pages_alloc_failed += (npages - i); goto out_4k; + } goto retry; } @@ -372,14 +374,14 @@ retry: /* if triggered by FW and failed by FW ignore */ if (event) { err = 0; - goto out_4k; + goto out_dropped; } } if (err) { err = mlx5_cmd_check(dev, err, in, out); mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); - goto out_4k; + goto out_dropped; } dev->priv.fw_pages += npages; @@ -394,6 +396,8 @@ retry: kvfree(in); return 0; +out_dropped: + dev->priv.give_pages_dropped += npages; out_4k: for (i--; i >= 0; i--) free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); @@ -516,6 +520,10 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); + if (err) { + npages = MLX5_GET(manage_pages_in, in, input_num_entries); + dev->priv.reclaim_pages_discard += npages; + } /* if triggered by FW event and failed by FW then ignore */ if (event && err == -EREMOTEIO) err = 0; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index c5f93b5910ed..00a914b0716e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -575,6 +575,9 @@ struct mlx5_priv { struct list_head free_list; u32 vfs_pages; u32 host_pf_pages; + u32 fw_pages_alloc_failed; + u32 give_pages_dropped; + u32 reclaim_pages_discard; struct mlx5_core_health health; struct list_head traps; -- cgit v1.2.3-71-gd317 From 5c422bfad2fbab96381273e50c7084465199501d Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Thu, 24 Feb 2022 00:58:58 +0200 Subject: net/mlx5: DR, Add support for matching on Internet Header Length (IHL) Add support for matching on new field - Internet Header Length (IHL). Signed-off-by: Muhammad Sammar Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c | 11 +++++++++-- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h | 4 +++- include/linux/mlx5/mlx5_ifc.h | 5 ++++- 6 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 38971fe1dfe1..1668c2b2f60f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -47,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) return spec->ttl_hoplimit; } +static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec) +{ + return spec->ipv4_ihl; +} + #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \ (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \ (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \ @@ -507,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.outer)) + if (dr_mask_is_ttl_set(&mask.outer) || + dr_mask_is_ipv4_ihl_set(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } @@ -614,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.inner)) + if (dr_mask_is_ttl_set(&mask.inner) || + dr_mask_is_ipv4_ihl_set(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 187e29b409b6..c7094fb10a7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -793,6 +793,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr); spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr); + spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr); spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr); spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 2d62950f7a29..80424d1e3bb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1152,6 +1152,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 6ca06800f1d9..d248a428f872 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -1331,6 +1331,7 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 55fcb751e24a..02590f665174 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -555,7 +555,9 @@ struct mlx5dr_match_spec { */ u32 tcp_dport:16; - u32 reserved_auto1:24; + u32 reserved_auto1:16; + u32 ipv4_ihl:4; + u32 reserved_auto2:4; u32 ttl_hoplimit:8; /* UDP source port.;tcp and udp sport/dport are mutually exclusive */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69985e4d8dfe..1f0c35162b7b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -493,7 +493,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x18]; + u8 reserved_at_c0[0x10]; + u8 ipv4_ihl[0x4]; + u8 reserved_at_c4[0x4]; + u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; -- cgit v1.2.3-71-gd317 From 6862c787c7e88df490675ed781dc9052dba88a56 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Wed, 23 Feb 2022 18:40:59 +0200 Subject: net/mlx5: DR, Add support for ConnectX-7 steering Add support for a new SW format version that is implemented by ConnectX-7. Except for several differences, the STEv2 is identical to STEv1, so for most callbacks the STEv2 context struct will call STEv1 functions. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 1 + .../mellanox/mlx5/core/steering/dr_domain.c | 2 +- .../mellanox/mlx5/core/steering/dr_matcher.c | 8 +- .../ethernet/mellanox/mlx5/core/steering/dr_ste.c | 2 + .../ethernet/mellanox/mlx5/core/steering/dr_ste.h | 1 + .../mellanox/mlx5/core/steering/dr_ste_v1.c | 202 +++++++++--------- .../mellanox/mlx5/core/steering/dr_ste_v1.h | 94 +++++++++ .../mellanox/mlx5/core/steering/dr_ste_v2.c | 231 +++++++++++++++++++++ .../ethernet/mellanox/mlx5/core/steering/fs_dr.c | 2 +- .../ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 2 +- include/linux/mlx5/mlx5_ifc.h | 1 + 11 files changed, 436 insertions(+), 110 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index a7170ab3af97..b94cca45eb78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -103,6 +103,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ steering/dr_ste_v0.o steering/dr_ste_v1.o \ + steering/dr_ste_v2.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o \ steering/dr_dbg.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 5fa7f9d6d8b9..fc6ae49b5ecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -8,7 +8,7 @@ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ ((dmn)->info.caps.dmn_type##_sw_owner || \ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ - (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) + (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7)) static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 1668c2b2f60f..a4b5b415df90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -108,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) static bool dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED); } @@ -149,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED); } @@ -266,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask, static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED); } static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index b25df6ae8ef3..518e949847a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -1367,6 +1367,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) return mlx5dr_ste_get_ctx_v0(); else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX) return mlx5dr_ste_get_ctx_v1(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_7) + return mlx5dr_ste_get_ctx_v2(); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 9d9b073df75b..17513baff9b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -201,5 +201,6 @@ struct mlx5dr_ste_ctx { struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void); struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void); #endif /* _DR_STE_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 2f6d0d62874f..fcb962c6db2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -3,7 +3,7 @@ #include #include "mlx5_ifc_dr_ste_v1.h" -#include "dr_ste.h" +#include "dr_ste_v1.h" #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ @@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type); } -static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) { u64 index = miss_addr >> 6; @@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index); } -static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | @@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) return index << 6; } -static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask); } -static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) { return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask); } @@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF); } -static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8); MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF); } -static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) { u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format); u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx); @@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi); } -static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) { u64 index = (icm_addr >> 5) | ht_size; @@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - bool is_rx, u16 gvmi) +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi); } -static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, - u32 ste_size) +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size) { u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL; u8 *mask = tag + DR_STE_SIZE_TAG; @@ -511,12 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste, memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action)); } -static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u32 actions_caps, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -635,12 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u32 actions_caps, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -808,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_action_set(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_set(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET); @@ -822,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action, MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data); } -static void dr_ste_v1_set_action_add(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_add(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD); @@ -836,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action, MLX5_SET(ste_double_action_add_v1, d_action, add_value, data); } -static void dr_ste_v1_set_action_copy(u8 *d_action, - u8 dst_hw_field, - u8 dst_shifter, - u8 dst_len, - u8 src_hw_field, - u8 src_shifter) +void dr_ste_v1_set_action_copy(u8 *d_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) { dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; @@ -856,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action, #define DR_STE_DECAP_L3_ACTION_NUM 8 #define DR_STE_L2_HDR_MAX_SZ 20 -static int dr_ste_v1_set_action_decap_l3_list(void *data, - u32 data_sz, - u8 *hw_action, - u32 hw_action_sz, - u16 *used_hw_action_num) +int dr_ste_v1_set_action_decap_l3_list(void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num) { u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {}; void *data_ptr = padded_data; @@ -985,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1009,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask); @@ -1033,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask); @@ -1068,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask); @@ -1209,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1242,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1322,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1344,8 +1342,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask); @@ -1384,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask); @@ -1408,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask); @@ -1435,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask); @@ -1480,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask); @@ -1515,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask); @@ -1556,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask); @@ -1603,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); @@ -1625,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask); @@ -1652,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask); @@ -1682,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask); @@ -1712,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask); @@ -1735,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask); @@ -1758,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask); @@ -1782,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask); @@ -1846,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); @@ -1901,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1910,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag; } -static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1935,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1968,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1991,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v return 0; } -static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask); @@ -2017,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -2044,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h new file mode 100644 index 000000000000..8a1d49790c6e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef _DR_STE_V1_ +#define _DR_STE_V1_ + +#include "dr_types.h" +#include "dr_ste.h" + +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr); +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p); +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask); +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p); +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type); +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p); +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi); +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size); +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter, + u8 dst_len, u8 src_hw_field, u8 src_shifter); +int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, + u32 hw_action_sz, u16 *used_hw_action_num); +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); + +#endif /* _DR_STE_V1_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c new file mode 100644 index 000000000000..c60fddd125d2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "dr_ste_v1.h" + +enum { + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, + }, +}; + +static struct mlx5dr_ste_ctx ste_ctx_v2 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v1_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, + .build_icmp_init = &dr_ste_v1_build_icmp_init, + .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, + .build_tnl_geneve_tlv_opt_exist_init = + &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init, + .build_register_0_init = &dr_ste_v1_build_register_0_init, + .build_register_1_init = &dr_ste_v1_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, + + /* Getters and Setters */ + .ste_init = &dr_ste_v1_init, + .set_next_lu_type = &dr_ste_v1_set_next_lu_type, + .get_next_lu_type = &dr_ste_v1_get_next_lu_type, + .set_miss_addr = &dr_ste_v1_set_miss_addr, + .get_miss_addr = &dr_ste_v1_get_miss_addr, + .set_hit_addr = &dr_ste_v1_set_hit_addr, + .set_byte_mask = &dr_ste_v1_set_byte_mask, + .get_byte_mask = &dr_ste_v1_get_byte_mask, + + /* Actions */ + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .set_actions_rx = &dr_ste_v1_set_actions_rx, + .set_actions_tx = &dr_ste_v1_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr), + .modify_field_arr = dr_ste_v2_action_modify_field_arr, + .set_action_set = &dr_ste_v1_set_action_set, + .set_action_add = &dr_ste_v1_set_action_add, + .set_action_copy = &dr_ste_v1_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + + /* Send */ + .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, +}; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void) +{ + return &ste_ctx_v2; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 57ffcab7294c..045b0cf90063 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -758,7 +758,7 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { if (ft_type != FS_FT_FDB || - MLX5_CAP_GEN(ns->dev, steering_format_version) != MLX5_STEERING_FORMAT_CONNECTX_6DX) + MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) return 0; return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index dfa223415fe2..03efbdf3fec3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -136,7 +136,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && (MLX5_CAP_GEN(dev, steering_format_version) <= - MLX5_STEERING_FORMAT_CONNECTX_6DX))); + MLX5_STEERING_FORMAT_CONNECTX_7))); } /* buddy functions & structure */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1f0c35162b7b..bcf60ede6fcc 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1346,6 +1346,7 @@ enum mlx5_fc_bulk_alloc_bitmask { enum { MLX5_STEERING_FORMAT_CONNECTX_5 = 0, MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, + MLX5_STEERING_FORMAT_CONNECTX_7 = 2, }; struct mlx5_ifc_cmd_hca_cap_bits { -- cgit v1.2.3-71-gd317 From 9a61d0838cd0a81529badfba7bfa39e81d5529d3 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Fri, 25 Feb 2022 20:00:21 +0530 Subject: drivers/nvdimm: Add nvdimm pmu structure A structure is added called nvdimm_pmu, for performance stats reporting support of nvdimm devices. It can be used to add device pmu data such as pmu data structure for performance stats, nvdimm device pointer along with cpumask attributes. Acked-by: Peter Zijlstra (Intel) Tested-by: Nageswara R Sastry Signed-off-by: Kajol Jain Reviewed-by: Madhavan Srinivasan Link: https://lore.kernel.org/r/20220225143024.47947-2-kjain@linux.ibm.com Signed-off-by: Dan Williams --- include/linux/nd.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nd.h b/include/linux/nd.h index 8a8c63edb1b2..ad186e828263 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -8,6 +8,7 @@ #include #include #include +#include enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -23,6 +24,25 @@ enum nvdimm_claim_class { NVDIMM_CCLASS_UNKNOWN, }; +/** + * struct nvdimm_pmu - data structure for nvdimm perf driver + * @pmu: pmu data structure for nvdimm performance stats. + * @dev: nvdimm device pointer. + * @cpu: designated cpu for counter access. + * @node: node for cpu hotplug notifier link. + * @cpuhp_state: state for cpu hotplug notification. + * @arch_cpumask: cpumask to get designated cpu for counter access. + */ +struct nvdimm_pmu { + struct pmu pmu; + struct device *dev; + int cpu; + struct hlist_node node; + enum cpuhp_state cpuhp_state; + /* cpumask provided by arch/platform specific code */ + struct cpumask arch_cpumask; +}; + struct nd_device_driver { struct device_driver drv; unsigned long type; -- cgit v1.2.3-71-gd317 From 0fab1ba6ad6ba1f76380f92ead95c6e861ef8116 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Fri, 25 Feb 2022 20:00:22 +0530 Subject: drivers/nvdimm: Add perf interface to expose nvdimm performance stats A common interface is added to get performance stats reporting support for nvdimm devices. Added interface defines supported event list, config fields for the event attributes and their corresponding bit values which are exported via sysfs. Interface also added support for pmu register/unregister functions, cpu hotplug feature along with macros for handling events addition via sysfs. It adds attribute groups for format, cpumask and events to the pmu structure. User could use the standard perf tool to access perf events exposed via nvdimm pmu. [Declare pmu functions in nd.h file to resolve implicit-function-declaration warning and make hotplug function static as reported by kernel test robot] Acked-by: Peter Zijlstra (Intel) Tested-by: Nageswara R Sastry Signed-off-by: Kajol Jain Link: https://lore.kernel.org/all/202202241242.zqzGkguy-lkp@intel.com/ Reported-by: kernel test robot Reviewed-by: Madhavan Srinivasan Link: https://lore.kernel.org/r/20220225143024.47947-3-kjain@linux.ibm.com Signed-off-by: Dan Williams --- drivers/nvdimm/Makefile | 1 + drivers/nvdimm/nd_perf.c | 328 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nd.h | 24 ++++ 3 files changed, 353 insertions(+) create mode 100644 drivers/nvdimm/nd_perf.c (limited to 'include/linux') diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile index 29203f3d3069..25dba6095612 100644 --- a/drivers/nvdimm/Makefile +++ b/drivers/nvdimm/Makefile @@ -18,6 +18,7 @@ nd_e820-y := e820.o libnvdimm-y := core.o libnvdimm-y += bus.o libnvdimm-y += dimm_devs.o +libnvdimm-y += nd_perf.o libnvdimm-y += dimm.o libnvdimm-y += region_devs.o libnvdimm-y += region.o diff --git a/drivers/nvdimm/nd_perf.c b/drivers/nvdimm/nd_perf.c new file mode 100644 index 000000000000..314415894acf --- /dev/null +++ b/drivers/nvdimm/nd_perf.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * nd_perf.c: NVDIMM Device Performance Monitoring Unit support + * + * Perf interface to expose nvdimm performance stats. + * + * Copyright (C) 2021 IBM Corporation + */ + +#define pr_fmt(fmt) "nvdimm_pmu: " fmt + +#include + +#define EVENT(_name, _code) enum{_name = _code} + +/* + * NVDIMM Events codes. + */ + +/* Controller Reset Count */ +EVENT(CTL_RES_CNT, 0x1); +/* Controller Reset Elapsed Time */ +EVENT(CTL_RES_TM, 0x2); +/* Power-on Seconds */ +EVENT(POWERON_SECS, 0x3); +/* Life Remaining */ +EVENT(MEM_LIFE, 0x4); +/* Critical Resource Utilization */ +EVENT(CRI_RES_UTIL, 0x5); +/* Host Load Count */ +EVENT(HOST_L_CNT, 0x6); +/* Host Store Count */ +EVENT(HOST_S_CNT, 0x7); +/* Host Store Duration */ +EVENT(HOST_S_DUR, 0x8); +/* Host Load Duration */ +EVENT(HOST_L_DUR, 0x9); +/* Media Read Count */ +EVENT(MED_R_CNT, 0xa); +/* Media Write Count */ +EVENT(MED_W_CNT, 0xb); +/* Media Read Duration */ +EVENT(MED_R_DUR, 0xc); +/* Media Write Duration */ +EVENT(MED_W_DUR, 0xd); +/* Cache Read Hit Count */ +EVENT(CACHE_RH_CNT, 0xe); +/* Cache Write Hit Count */ +EVENT(CACHE_WH_CNT, 0xf); +/* Fast Write Count */ +EVENT(FAST_W_CNT, 0x10); + +NVDIMM_EVENT_ATTR(ctl_res_cnt, CTL_RES_CNT); +NVDIMM_EVENT_ATTR(ctl_res_tm, CTL_RES_TM); +NVDIMM_EVENT_ATTR(poweron_secs, POWERON_SECS); +NVDIMM_EVENT_ATTR(mem_life, MEM_LIFE); +NVDIMM_EVENT_ATTR(cri_res_util, CRI_RES_UTIL); +NVDIMM_EVENT_ATTR(host_l_cnt, HOST_L_CNT); +NVDIMM_EVENT_ATTR(host_s_cnt, HOST_S_CNT); +NVDIMM_EVENT_ATTR(host_s_dur, HOST_S_DUR); +NVDIMM_EVENT_ATTR(host_l_dur, HOST_L_DUR); +NVDIMM_EVENT_ATTR(med_r_cnt, MED_R_CNT); +NVDIMM_EVENT_ATTR(med_w_cnt, MED_W_CNT); +NVDIMM_EVENT_ATTR(med_r_dur, MED_R_DUR); +NVDIMM_EVENT_ATTR(med_w_dur, MED_W_DUR); +NVDIMM_EVENT_ATTR(cache_rh_cnt, CACHE_RH_CNT); +NVDIMM_EVENT_ATTR(cache_wh_cnt, CACHE_WH_CNT); +NVDIMM_EVENT_ATTR(fast_w_cnt, FAST_W_CNT); + +static struct attribute *nvdimm_events_attr[] = { + NVDIMM_EVENT_PTR(CTL_RES_CNT), + NVDIMM_EVENT_PTR(CTL_RES_TM), + NVDIMM_EVENT_PTR(POWERON_SECS), + NVDIMM_EVENT_PTR(MEM_LIFE), + NVDIMM_EVENT_PTR(CRI_RES_UTIL), + NVDIMM_EVENT_PTR(HOST_L_CNT), + NVDIMM_EVENT_PTR(HOST_S_CNT), + NVDIMM_EVENT_PTR(HOST_S_DUR), + NVDIMM_EVENT_PTR(HOST_L_DUR), + NVDIMM_EVENT_PTR(MED_R_CNT), + NVDIMM_EVENT_PTR(MED_W_CNT), + NVDIMM_EVENT_PTR(MED_R_DUR), + NVDIMM_EVENT_PTR(MED_W_DUR), + NVDIMM_EVENT_PTR(CACHE_RH_CNT), + NVDIMM_EVENT_PTR(CACHE_WH_CNT), + NVDIMM_EVENT_PTR(FAST_W_CNT), + NULL +}; + +static struct attribute_group nvdimm_pmu_events_group = { + .name = "events", + .attrs = nvdimm_events_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-4"); + +static struct attribute *nvdimm_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group nvdimm_pmu_format_group = { + .name = "format", + .attrs = nvdimm_pmu_format_attr, +}; + +ssize_t nvdimm_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +static ssize_t nvdimm_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct nvdimm_pmu *nd_pmu; + + nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu)); +} + +static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct nvdimm_pmu *nd_pmu; + u32 target; + int nodeid; + const struct cpumask *cpumask; + + nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node); + + /* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */ + cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask); + + /* + * If given cpu is not same as current designated cpu for + * counter access, just return. + */ + if (cpu != nd_pmu->cpu) + return 0; + + /* Check for any active cpu in nd_pmu->arch_cpumask */ + target = cpumask_any(&nd_pmu->arch_cpumask); + + /* + * Incase we don't have any active cpu in nd_pmu->arch_cpumask, + * check in given cpu's numa node list. + */ + if (target >= nr_cpu_ids) { + nodeid = cpu_to_node(cpu); + cpumask = cpumask_of_node(nodeid); + target = cpumask_any_but(cpumask, cpu); + } + nd_pmu->cpu = target; + + /* Migrate nvdimm pmu events to the new target cpu if valid */ + if (target >= 0 && target < nr_cpu_ids) + perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target); + + return 0; +} + +static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct nvdimm_pmu *nd_pmu; + + nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node); + + if (nd_pmu->cpu >= nr_cpu_ids) + nd_pmu->cpu = cpu; + + return 0; +} + +static int create_cpumask_attr_group(struct nvdimm_pmu *nd_pmu) +{ + struct perf_pmu_events_attr *pmu_events_attr; + struct attribute **attrs_group; + struct attribute_group *nvdimm_pmu_cpumask_group; + + pmu_events_attr = kzalloc(sizeof(*pmu_events_attr), GFP_KERNEL); + if (!pmu_events_attr) + return -ENOMEM; + + attrs_group = kzalloc(2 * sizeof(struct attribute *), GFP_KERNEL); + if (!attrs_group) { + kfree(pmu_events_attr); + return -ENOMEM; + } + + /* Allocate memory for cpumask attribute group */ + nvdimm_pmu_cpumask_group = kzalloc(sizeof(*nvdimm_pmu_cpumask_group), GFP_KERNEL); + if (!nvdimm_pmu_cpumask_group) { + kfree(pmu_events_attr); + kfree(attrs_group); + return -ENOMEM; + } + + sysfs_attr_init(&pmu_events_attr->attr.attr); + pmu_events_attr->attr.attr.name = "cpumask"; + pmu_events_attr->attr.attr.mode = 0444; + pmu_events_attr->attr.show = nvdimm_pmu_cpumask_show; + attrs_group[0] = &pmu_events_attr->attr.attr; + attrs_group[1] = NULL; + + nvdimm_pmu_cpumask_group->attrs = attrs_group; + nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group; + return 0; +} + +static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu) +{ + int nodeid, rc; + const struct cpumask *cpumask; + + /* + * Incase of cpu hotplug feature, arch specific code + * can provide required cpumask which can be used + * to get designatd cpu for counter access. + * Check for any active cpu in nd_pmu->arch_cpumask. + */ + if (!cpumask_empty(&nd_pmu->arch_cpumask)) { + nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask); + } else { + /* pick active cpu from the cpumask of device numa node. */ + nodeid = dev_to_node(nd_pmu->dev); + cpumask = cpumask_of_node(nodeid); + nd_pmu->cpu = cpumask_any(cpumask); + } + + rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/nvdimm:online", + nvdimm_pmu_cpu_online, nvdimm_pmu_cpu_offline); + + if (rc < 0) + return rc; + + nd_pmu->cpuhp_state = rc; + + /* Register the pmu instance for cpu hotplug */ + rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node); + if (rc) { + cpuhp_remove_multi_state(nd_pmu->cpuhp_state); + return rc; + } + + /* Create cpumask attribute group */ + rc = create_cpumask_attr_group(nd_pmu); + if (rc) { + cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node); + cpuhp_remove_multi_state(nd_pmu->cpuhp_state); + return rc; + } + + return 0; +} + +static void nvdimm_pmu_free_hotplug_memory(struct nvdimm_pmu *nd_pmu) +{ + cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node); + cpuhp_remove_multi_state(nd_pmu->cpuhp_state); + + if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]) + kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs); + kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]); +} + +int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev) +{ + int rc; + + if (!nd_pmu || !pdev) + return -EINVAL; + + /* event functions like add/del/read/event_init and pmu name should not be NULL */ + if (WARN_ON_ONCE(!(nd_pmu->pmu.event_init && nd_pmu->pmu.add && + nd_pmu->pmu.del && nd_pmu->pmu.read && nd_pmu->pmu.name))) + return -EINVAL; + + nd_pmu->pmu.attr_groups = kzalloc((NVDIMM_PMU_NULL_ATTR + 1) * + sizeof(struct attribute_group *), GFP_KERNEL); + if (!nd_pmu->pmu.attr_groups) + return -ENOMEM; + + /* + * Add platform_device->dev pointer to nvdimm_pmu to access + * device data in events functions. + */ + nd_pmu->dev = &pdev->dev; + + /* Fill attribute groups for the nvdimm pmu device */ + nd_pmu->pmu.attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group; + nd_pmu->pmu.attr_groups[NVDIMM_PMU_EVENT_ATTR] = &nvdimm_pmu_events_group; + nd_pmu->pmu.attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL; + + /* Fill attribute group for cpumask */ + rc = nvdimm_pmu_cpu_hotplug_init(nd_pmu); + if (rc) { + pr_info("cpu hotplug feature failed for device: %s\n", nd_pmu->pmu.name); + kfree(nd_pmu->pmu.attr_groups); + return rc; + } + + rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1); + if (rc) { + kfree(nd_pmu->pmu.attr_groups); + nvdimm_pmu_free_hotplug_memory(nd_pmu); + return rc; + } + + pr_info("%s NVDIMM performance monitor support registered\n", + nd_pmu->pmu.name); + + return 0; +} +EXPORT_SYMBOL_GPL(register_nvdimm_pmu); + +void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) +{ + perf_pmu_unregister(&nd_pmu->pmu); + nvdimm_pmu_free_hotplug_memory(nd_pmu); + kfree(nd_pmu); +} +EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu); diff --git a/include/linux/nd.h b/include/linux/nd.h index ad186e828263..4813c7089e5c 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -9,6 +9,7 @@ #include #include #include +#include enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -24,6 +25,19 @@ enum nvdimm_claim_class { NVDIMM_CCLASS_UNKNOWN, }; +#define NVDIMM_EVENT_VAR(_id) event_attr_##_id +#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr) + +#define NVDIMM_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \ + nvdimm_events_sysfs_show) + +/* Event attribute array index */ +#define NVDIMM_PMU_FORMAT_ATTR 0 +#define NVDIMM_PMU_EVENT_ATTR 1 +#define NVDIMM_PMU_CPUMASK_ATTR 2 +#define NVDIMM_PMU_NULL_ATTR 3 + /** * struct nvdimm_pmu - data structure for nvdimm perf driver * @pmu: pmu data structure for nvdimm performance stats. @@ -43,6 +57,16 @@ struct nvdimm_pmu { struct cpumask arch_cpumask; }; +extern ssize_t nvdimm_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); + +int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev); +void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu); +void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); +int perf_pmu_register(struct pmu *pmu, const char *name, int type); +void perf_pmu_unregister(struct pmu *pmu); + struct nd_device_driver { struct device_driver drv; unsigned long type; -- cgit v1.2.3-71-gd317 From 013a3e7c79ac51e6cdd84e3580863a562c7597c7 Mon Sep 17 00:00:00 2001 From: Min Li Date: Tue, 8 Mar 2022 09:10:51 -0500 Subject: ptp: idt82p33: use rsmu driver to access i2c/spi bus rsmu (Renesas Synchronization Management Unit ) driver is located in drivers/mfd and responsible for creating multiple devices including idt82p33 phc, which will then use the exposed regmap and mutex handle to access i2c/spi bus. Signed-off-by: Min Li Acked-by: Richard Cochran Link: https://lore.kernel.org/r/1646748651-16811-1-git-send-email-min.li.xe@renesas.com Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_idt82p33.c | 344 ++++++++++----------------------------- drivers/ptp/ptp_idt82p33.h | 151 +++++------------ include/linux/mfd/idt82p33_reg.h | 3 + 3 files changed, 129 insertions(+), 369 deletions(-) (limited to 'include/linux') diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c index c1c959f7e52b..97c1be44e323 100644 --- a/drivers/ptp/ptp_idt82p33.c +++ b/drivers/ptp/ptp_idt82p33.c @@ -6,13 +6,17 @@ #define pr_fmt(fmt) "IDT_82p33xxx: " fmt #include -#include +#include #include #include #include +#include #include #include #include +#include +#include +#include #include "ptp_private.h" #include "ptp_idt82p33.h" @@ -24,15 +28,25 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILENAME); /* Module Parameters */ -static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC; -module_param(sync_tod_timeout, uint, 0); -MODULE_PARM_DESC(sync_tod_timeout, -"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)"); - static u32 phase_snap_threshold = SNAP_THRESHOLD_NS; module_param(phase_snap_threshold, uint, 0); MODULE_PARM_DESC(phase_snap_threshold, -"threshold (150000ns by default) below which adjtime would ignore"); +"threshold (10000ns by default) below which adjtime would use double dco"); + +static char *firmware; +module_param(firmware, charp, 0); + +static inline int idt82p33_read(struct idt82p33 *idt82p33, u16 regaddr, + u8 *buf, u16 count) +{ + return regmap_bulk_read(idt82p33->regmap, regaddr, buf, count); +} + +static inline int idt82p33_write(struct idt82p33 *idt82p33, u16 regaddr, + u8 *buf, u16 count) +{ + return regmap_bulk_write(idt82p33->regmap, regaddr, buf, count); +} static void idt82p33_byte_array_to_timespec(struct timespec64 *ts, u8 buf[TOD_BYTE_COUNT]) @@ -78,110 +92,6 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts, } } -static int idt82p33_xfer_read(struct idt82p33 *idt82p33, - unsigned char regaddr, - unsigned char *buf, - unsigned int count) -{ - struct i2c_client *client = idt82p33->client; - struct i2c_msg msg[2]; - int cnt; - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 1; - msg[0].buf = ®addr; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].len = count; - msg[1].buf = buf; - - cnt = i2c_transfer(client->adapter, msg, 2); - if (cnt < 0) { - dev_err(&client->dev, "i2c_transfer returned %d\n", cnt); - return cnt; - } else if (cnt != 2) { - dev_err(&client->dev, - "i2c_transfer sent only %d of %d messages\n", cnt, 2); - return -EIO; - } - return 0; -} - -static int idt82p33_xfer_write(struct idt82p33 *idt82p33, - u8 regaddr, - u8 *buf, - u16 count) -{ - struct i2c_client *client = idt82p33->client; - /* we add 1 byte for device register */ - u8 msg[IDT82P33_MAX_WRITE_COUNT + 1]; - int err; - - if (count > IDT82P33_MAX_WRITE_COUNT) - return -EINVAL; - - msg[0] = regaddr; - memcpy(&msg[1], buf, count); - - err = i2c_master_send(client, msg, count + 1); - if (err < 0) { - dev_err(&client->dev, "i2c_master_send returned %d\n", err); - return err; - } - - return 0; -} - -static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val) -{ - int err; - - if (idt82p33->page_offset == val) - return 0; - - err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val)); - if (err) - dev_err(&idt82p33->client->dev, - "failed to set page offset %d\n", val); - else - idt82p33->page_offset = val; - - return err; -} - -static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count, bool write) -{ - u8 offset, page; - int err; - - page = _PAGE(regaddr); - offset = _OFFSET(regaddr); - - err = idt82p33_page_offset(idt82p33, page); - if (err) - return err; - - if (write) - return idt82p33_xfer_write(idt82p33, offset, buf, count); - - return idt82p33_xfer_read(idt82p33, offset, buf, count); -} - -static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count) -{ - return idt82p33_rdwr(idt82p33, regaddr, buf, count, false); -} - -static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr, - unsigned char *buf, unsigned int count) -{ - return idt82p33_rdwr(idt82p33, regaddr, buf, count, true); -} - static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel, enum pll_mode mode) { @@ -206,7 +116,7 @@ static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel, if (err) return err; - channel->pll_mode = dpll_mode; + channel->pll_mode = mode; return 0; } @@ -467,7 +377,7 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel) err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } @@ -499,8 +409,8 @@ static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33, if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) { if ((val & 0xfc) || !(val & 0x3)) { - dev_err(&idt82p33->client->dev, - "Invalid PLL mask 0x%hhx\n", val); + dev_err(idt82p33->dev, + "Invalid PLL mask 0x%x\n", val); err = -EINVAL; } else { idt82p33->pll_mask = val; @@ -520,14 +430,14 @@ static void idt82p33_display_masks(struct idt82p33 *idt82p33) { u8 mask, i; - dev_info(&idt82p33->client->dev, + dev_info(idt82p33->dev, "pllmask = 0x%02x\n", idt82p33->pll_mask); for (i = 0; i < MAX_PHC_PLL; i++) { mask = 1 << i; if (mask & idt82p33->pll_mask) - dev_info(&idt82p33->client->dev, + dev_info(idt82p33->dev, "PLL%d output_mask = 0x%04x\n", i, idt82p33->channel[i].output_mask); } @@ -539,11 +449,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable) u8 sync_cnfg; int err; - /* Turn it off after sync_tod_timeout seconds */ - if (enable && sync_tod_timeout) - ptp_schedule_worker(channel->ptp_clock, - sync_tod_timeout * HZ); - err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg, &sync_cnfg, sizeof(sync_cnfg)); if (err) @@ -557,22 +462,6 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable) &sync_cnfg, sizeof(sync_cnfg)); } -static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp) -{ - struct idt82p33_channel *channel = - container_of(ptp, struct idt82p33_channel, caps); - struct idt82p33 *idt82p33 = channel->idt82p33; - - mutex_lock(&idt82p33->reg_lock); - - (void)idt82p33_sync_tod(channel, false); - - mutex_unlock(&idt82p33->reg_lock); - - /* Return a negative value here to not reschedule */ - return -1; -} - static int idt82p33_output_enable(struct idt82p33_channel *channel, bool enable, unsigned int outn) { @@ -634,18 +523,11 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel) struct idt82p33 *idt82p33 = channel->idt82p33; struct timespec64 ts = {0, 0}; int err; - u8 val; - - val = 0; - err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg, - &val, sizeof(val)); - if (err) - return err; err = idt82p33_measure_tod_write_overhead(channel); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } @@ -673,16 +555,14 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33) } static int idt82p33_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, int on) { struct idt82p33_channel *channel = container_of(ptp, struct idt82p33_channel, caps); struct idt82p33 *idt82p33 = channel->idt82p33; - int err; - - err = -EOPNOTSUPP; + int err = -EOPNOTSUPP; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); if (rq->type == PTP_CLK_REQ_PEROUT) { if (!on) @@ -690,15 +570,18 @@ static int idt82p33_enable(struct ptp_clock_info *ptp, &rq->perout); /* Only accept a 1-PPS aligned to the second. */ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 || - rq->perout.period.nsec) { + rq->perout.period.nsec) err = -ERANGE; - } else + else err = idt82p33_perout_enable(channel, true, &rq->perout); } - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); + if (err) + dev_err(idt82p33->dev, + "Failed in %s with err %d!\n", __func__, err); return err; } @@ -727,11 +610,11 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns) val[3] = (offset_regval >> 24) & 0x1F; val[3] |= PH_OFFSET_EN; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); goto out; } @@ -740,7 +623,7 @@ static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns) sizeof(val)); out: - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); return err; } @@ -751,12 +634,12 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_adjfine(channel, scaled_ppm); + mutex_unlock(idt82p33->lock); if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); return err; } @@ -768,29 +651,20 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); if (abs(delta_ns) < phase_snap_threshold) { - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); return 0; } err = _idt82p33_adjtime(channel, delta_ns); - if (err) { - mutex_unlock(&idt82p33->reg_lock); - dev_err(&idt82p33->client->dev, - "Adjtime failed in %s with err %d!\n", __func__, err); - return err; - } + mutex_unlock(idt82p33->lock); - err = idt82p33_sync_tod(channel, true); if (err) - dev_err(&idt82p33->client->dev, - "Sync_tod failed in %s with err %d!\n", __func__, err); - - mutex_unlock(&idt82p33->reg_lock); - + dev_err(idt82p33->dev, + "Failed in %s with err %d!\n", __func__, err); return err; } @@ -801,31 +675,31 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_gettime(channel, ts); + mutex_unlock(idt82p33->lock); + if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); - return err; } static int idt82p33_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) + const struct timespec64 *ts) { struct idt82p33_channel *channel = container_of(ptp, struct idt82p33_channel, caps); struct idt82p33 *idt82p33 = channel->idt82p33; int err; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = _idt82p33_settime(channel, ts); + mutex_unlock(idt82p33->lock); + if (err) - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); - mutex_unlock(&idt82p33->reg_lock); - return err; } @@ -864,7 +738,7 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index) static void idt82p33_caps_init(struct ptp_clock_info *caps) { caps->owner = THIS_MODULE; - caps->max_adj = 92000; + caps->max_adj = DCO_MAX_PPB; caps->n_per_out = 11; caps->adjphase = idt82p33_adjwritephase; caps->adjfine = idt82p33_adjfine; @@ -872,7 +746,6 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps) caps->gettime64 = idt82p33_gettime; caps->settime64 = idt82p33_settime; caps->enable = idt82p33_enable; - caps->do_aux_work = idt82p33_sync_tod_work_handler; } static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) @@ -887,7 +760,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_channel_init(channel, index); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Channel_init failed in %s with err %d!\n", __func__, err); return err; @@ -912,7 +785,7 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Dpll_set_mode failed in %s with err %d!\n", __func__, err); return err; @@ -920,13 +793,13 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index) err = idt82p33_enable_tod(channel); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Enable_tod failed in %s with err %d!\n", __func__, err); return err; } - dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n", + dev_info(idt82p33->dev, "PLL%d registered as ptp%d\n", index, channel->ptp_clock->index); return 0; @@ -940,25 +813,24 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33) int err; s32 len; - dev_dbg(&idt82p33->client->dev, - "requesting firmware '%s'\n", FW_FILENAME); + dev_dbg(idt82p33->dev, "requesting firmware '%s'\n", FW_FILENAME); - err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev); + err = request_firmware(&fw, FW_FILENAME, idt82p33->dev); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); return err; } - dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size); + dev_dbg(idt82p33->dev, "firmware size %zu bytes\n", fw->size); rec = (struct idt82p33_fwrc *) fw->data; for (len = fw->size; len > 0; len -= sizeof(*rec)) { if (rec->reserved) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "bad firmware, reserved field non-zero\n"); err = -EINVAL; } else { @@ -973,16 +845,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33) } if (err == 0) { - /* maximum 8 pages */ - if (page >= PAGE_NUM) - continue; - /* Page size 128, last 4 bytes of page skipped */ - if (((loaddr > 0x7b) && (loaddr <= 0x7f)) - || loaddr > 0xfb) + if (loaddr > 0x7b) continue; - err = idt82p33_write(idt82p33, _ADDR(page, loaddr), + err = idt82p33_write(idt82p33, REG_ADDR(page, loaddr), &val, sizeof(val)); } @@ -997,36 +864,34 @@ out: } -static int idt82p33_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int idt82p33_probe(struct platform_device *pdev) { + struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent); struct idt82p33 *idt82p33; int err; u8 i; - (void)id; - - idt82p33 = devm_kzalloc(&client->dev, + idt82p33 = devm_kzalloc(&pdev->dev, sizeof(struct idt82p33), GFP_KERNEL); if (!idt82p33) return -ENOMEM; - mutex_init(&idt82p33->reg_lock); - - idt82p33->client = client; - idt82p33->page_offset = 0xff; + idt82p33->dev = &pdev->dev; + idt82p33->mfd = pdev->dev.parent; + idt82p33->lock = &ddata->lock; + idt82p33->regmap = ddata->regmap; idt82p33->tod_write_overhead_ns = 0; idt82p33->calculate_overhead_flag = 0; idt82p33->pll_mask = DEFAULT_PLL_MASK; idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0; idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1; - mutex_lock(&idt82p33->reg_lock); + mutex_lock(idt82p33->lock); err = idt82p33_load_firmware(idt82p33); if (err) - dev_warn(&idt82p33->client->dev, + dev_warn(idt82p33->dev, "loading firmware failed with %d\n", err); if (idt82p33->pll_mask) { @@ -1034,7 +899,7 @@ static int idt82p33_probe(struct i2c_client *client, if (idt82p33->pll_mask & (1 << i)) { err = idt82p33_enable_channel(idt82p33, i); if (err) { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "Failed in %s with err %d!\n", __func__, err); break; @@ -1042,69 +907,38 @@ static int idt82p33_probe(struct i2c_client *client, } } } else { - dev_err(&idt82p33->client->dev, + dev_err(idt82p33->dev, "no PLLs flagged as PHCs, nothing to do\n"); err = -ENODEV; } - mutex_unlock(&idt82p33->reg_lock); + mutex_unlock(idt82p33->lock); if (err) { idt82p33_ptp_clock_unregister_all(idt82p33); return err; } - i2c_set_clientdata(client, idt82p33); + platform_set_drvdata(pdev, idt82p33); return 0; } -static int idt82p33_remove(struct i2c_client *client) +static int idt82p33_remove(struct platform_device *pdev) { - struct idt82p33 *idt82p33 = i2c_get_clientdata(client); + struct idt82p33 *idt82p33 = platform_get_drvdata(pdev); idt82p33_ptp_clock_unregister_all(idt82p33); - mutex_destroy(&idt82p33->reg_lock); return 0; } -#ifdef CONFIG_OF -static const struct of_device_id idt82p33_dt_id[] = { - { .compatible = "idt,82p33810" }, - { .compatible = "idt,82p33813" }, - { .compatible = "idt,82p33814" }, - { .compatible = "idt,82p33831" }, - { .compatible = "idt,82p33910" }, - { .compatible = "idt,82p33913" }, - { .compatible = "idt,82p33914" }, - { .compatible = "idt,82p33931" }, - {}, -}; -MODULE_DEVICE_TABLE(of, idt82p33_dt_id); -#endif - -static const struct i2c_device_id idt82p33_i2c_id[] = { - { "idt82p33810", }, - { "idt82p33813", }, - { "idt82p33814", }, - { "idt82p33831", }, - { "idt82p33910", }, - { "idt82p33913", }, - { "idt82p33914", }, - { "idt82p33931", }, - {}, -}; -MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id); - -static struct i2c_driver idt82p33_driver = { +static struct platform_driver idt82p33_driver = { .driver = { - .of_match_table = of_match_ptr(idt82p33_dt_id), - .name = "idt82p33", + .name = "82p33x1x-phc", }, - .probe = idt82p33_probe, - .remove = idt82p33_remove, - .id_table = idt82p33_i2c_id, + .probe = idt82p33_probe, + .remove = idt82p33_remove, }; -module_i2c_driver(idt82p33_driver); +module_platform_driver(idt82p33_driver); diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h index 1c7a0f0872e8..0ea1c35c0f9f 100644 --- a/drivers/ptp/ptp_idt82p33.h +++ b/drivers/ptp/ptp_idt82p33.h @@ -8,94 +8,19 @@ #define PTP_IDT82P33_H #include -#include +#include +#include - -/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */ -#define PAGE_NUM (8) -#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f)) -#define _PAGE(addr) (((addr) >> 0x7) & 0x7) -#define _OFFSET(addr) ((addr) & 0x7f) - -#define DPLL1_TOD_CNFG 0x134 -#define DPLL2_TOD_CNFG 0x1B4 - -#define DPLL1_TOD_STS 0x10B -#define DPLL2_TOD_STS 0x18B - -#define DPLL1_TOD_TRIGGER 0x115 -#define DPLL2_TOD_TRIGGER 0x195 - -#define DPLL1_OPERATING_MODE_CNFG 0x120 -#define DPLL2_OPERATING_MODE_CNFG 0x1A0 - -#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C -#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC - -#define DPLL1_PHASE_OFFSET_CNFG 0x143 -#define DPLL2_PHASE_OFFSET_CNFG 0x1C3 - -#define DPLL1_SYNC_EDGE_CNFG 0X140 -#define DPLL2_SYNC_EDGE_CNFG 0X1C0 - -#define DPLL1_INPUT_MODE_CNFG 0X116 -#define DPLL2_INPUT_MODE_CNFG 0X196 - -#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn))) - -#define PAGE_ADDR 0x7F -/* Register Map end */ - -/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/ -#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf)) -#define SYNC_TOD BIT(1) -#define PH_OFFSET_EN BIT(7) -#define SQUELCH_ENABLE BIT(5) - -/* Bit definitions for the DPLL_MODE register */ -#define PLL_MODE_SHIFT (0) -#define PLL_MODE_MASK (0x1F) - -#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) - -enum pll_mode { - PLL_MODE_MIN = 0, - PLL_MODE_AUTOMATIC = PLL_MODE_MIN, - PLL_MODE_FORCE_FREERUN = 1, - PLL_MODE_FORCE_HOLDOVER = 2, - PLL_MODE_FORCE_LOCKED = 4, - PLL_MODE_FORCE_PRE_LOCKED2 = 5, - PLL_MODE_FORCE_PRE_LOCKED = 6, - PLL_MODE_FORCE_LOST_PHASE = 7, - PLL_MODE_DCO = 10, - PLL_MODE_WPH = 18, - PLL_MODE_MAX = PLL_MODE_WPH, -}; - -enum hw_tod_trig_sel { - HW_TOD_TRIG_SEL_MIN = 0, - HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN, - HW_TOD_TRIG_SEL_SYNC_SEL = 1, - HW_TOD_TRIG_SEL_IN12 = 2, - HW_TOD_TRIG_SEL_IN13 = 3, - HW_TOD_TRIG_SEL_IN14 = 4, - HW_TOD_TRIG_SEL_TOD_PPS = 5, - HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6, - HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7, - HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8, - HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9, - HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, - WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, -}; - -/* Register bit definitions end */ #define FW_FILENAME "idt82p33xxx.bin" -#define MAX_PHC_PLL (2) -#define TOD_BYTE_COUNT (10) -#define MAX_MEASURMENT_COUNT (5) -#define SNAP_THRESHOLD_NS (150000) -#define SYNC_TOD_TIMEOUT_SEC (5) -#define IDT82P33_MAX_WRITE_COUNT (512) +#define MAX_PHC_PLL (2) +#define TOD_BYTE_COUNT (10) +#define DCO_MAX_PPB (92000) +#define MAX_MEASURMENT_COUNT (5) +#define SNAP_THRESHOLD_NS (10000) +#define IMMEDIATE_SNAP_THRESHOLD_NS (50000) +#define DDCO_THRESHOLD_NS (5) +#define IDT82P33_MAX_WRITE_COUNT (512) +#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) #define PLLMASK_ADDR_HI 0xFF #define PLLMASK_ADDR_LO 0xA5 @@ -116,15 +41,25 @@ enum hw_tod_trig_sel { #define DEFAULT_OUTPUT_MASK_PLL0 (0xc0) #define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0 +/** + * @brief Maximum absolute value for write phase offset in femtoseconds + */ +#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll) + +/** @brief Phase offset resolution + * + * DPLL phase offset = 10^15 fs / ( System Clock * 2^13) + * = 10^15 fs / ( 1638400000 * 2^23) + * = 74.5058059692382 fs + */ +#define IDT_T0DPLL_PHASE_RESOL 74506 + /* PTP Hardware Clock interface */ struct idt82p33_channel { struct ptp_clock_info caps; struct ptp_clock *ptp_clock; - struct idt82p33 *idt82p33; - enum pll_mode pll_mode; - /* task to turn off SYNC_TOD bit after pps sync */ - struct delayed_work sync_tod_work; - bool sync_tod_on; + struct idt82p33 *idt82p33; + enum pll_mode pll_mode; s32 current_freq_ppb; u8 output_mask; u16 dpll_tod_cnfg; @@ -138,15 +73,17 @@ struct idt82p33_channel { }; struct idt82p33 { - struct idt82p33_channel channel[MAX_PHC_PLL]; - struct i2c_client *client; - u8 page_offset; - u8 pll_mask; - ktime_t start_time; - int calculate_overhead_flag; - s64 tod_write_overhead_ns; - /* Protects I2C read/modify/write registers from concurrent access */ - struct mutex reg_lock; + struct idt82p33_channel channel[MAX_PHC_PLL]; + struct device *dev; + u8 pll_mask; + /* Mutex to protect operations from being interrupted */ + struct mutex *lock; + struct regmap *regmap; + struct device *mfd; + /* Overhead calculation for adjtime */ + ktime_t start_time; + int calculate_overhead_flag; + s64 tod_write_overhead_ns; }; /* firmware interface */ @@ -157,18 +94,4 @@ struct idt82p33_fwrc { u8 reserved; } __packed; -/** - * @brief Maximum absolute value for write phase offset in femtoseconds - */ -#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll) - -/** @brief Phase offset resolution - * - * DPLL phase offset = 10^15 fs / ( System Clock * 2^13) - * = 10^15 fs / ( 1638400000 * 2^23) - * = 74.5058059692382 fs - */ -#define IDT_T0DPLL_PHASE_RESOL 74506 - - #endif /* PTP_IDT82P33_H */ diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h index 129a6c078221..1db532feeb91 100644 --- a/include/linux/mfd/idt82p33_reg.h +++ b/include/linux/mfd/idt82p33_reg.h @@ -7,6 +7,8 @@ #ifndef HAVE_IDT82P33_REG #define HAVE_IDT82P33_REG +#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f)) + /* Register address */ #define DPLL1_TOD_CNFG 0x134 #define DPLL2_TOD_CNFG 0x1B4 @@ -41,6 +43,7 @@ #define REG_SOFT_RESET 0X381 #define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn))) +#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf)) /* Register bit definitions */ #define SYNC_TOD BIT(1) -- cgit v1.2.3-71-gd317 From 8ddde07a3d285a0f3cec14924446608320fdc013 Mon Sep 17 00:00:00 2001 From: Tian Tao Date: Tue, 8 Mar 2022 16:59:10 +0800 Subject: dma-mapping: benchmark: extract a common header file for map_benchmark definition kernel/dma/map_benchmark.c and selftests/dma/dma_map_benchmark.c have duplicate map_benchmark definitions, which tends to lead to inconsistent changes to map_benchmark on both sides, extract a common header file to avoid this problem. Signed-off-by: Tian Tao Acked-by: Barry Song Reviewed-by: Shuah Khan Signed-off-by: Christoph Hellwig --- include/linux/map_benchmark.h | 31 +++++++++++++++++++++++++ kernel/dma/map_benchmark.c | 25 +------------------- tools/testing/selftests/dma/dma_map_benchmark.c | 25 +------------------- 3 files changed, 33 insertions(+), 48 deletions(-) create mode 100644 include/linux/map_benchmark.h (limited to 'include/linux') diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h new file mode 100644 index 000000000000..62674c83bde4 --- /dev/null +++ b/include/linux/map_benchmark.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 HiSilicon Limited. + */ + +#ifndef _KERNEL_DMA_BENCHMARK_H +#define _KERNEL_DMA_BENCHMARK_H + +#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) +#define DMA_MAP_MAX_THREADS 1024 +#define DMA_MAP_MAX_SECONDS 300 +#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) + +#define DMA_MAP_BIDIRECTIONAL 0 +#define DMA_MAP_TO_DEVICE 1 +#define DMA_MAP_FROM_DEVICE 2 + +struct map_benchmark { + __u64 avg_map_100ns; /* average map latency in 100ns */ + __u64 map_stddev; /* standard deviation of map latency */ + __u64 avg_unmap_100ns; /* as above */ + __u64 unmap_stddev; + __u32 threads; /* how many threads will do map/unmap in parallel */ + __u32 seconds; /* how long the test will last */ + __s32 node; /* which numa node this benchmark will run on */ + __u32 dma_bits; /* DMA addressing capability */ + __u32 dma_dir; /* DMA data direction */ + __u32 dma_trans_ns; /* time for DMA transmission in ns */ + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ +}; +#endif /* _KERNEL_DMA_BENCHMARK_H */ diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index 9b9af1bd6be3..0520a8f4fb1d 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,30 +19,6 @@ #include #include -#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) -#define DMA_MAP_MAX_THREADS 1024 -#define DMA_MAP_MAX_SECONDS 300 -#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) - -#define DMA_MAP_BIDIRECTIONAL 0 -#define DMA_MAP_TO_DEVICE 1 -#define DMA_MAP_FROM_DEVICE 2 - -struct map_benchmark { - __u64 avg_map_100ns; /* average map latency in 100ns */ - __u64 map_stddev; /* standard deviation of map latency */ - __u64 avg_unmap_100ns; /* as above */ - __u64 unmap_stddev; - __u32 threads; /* how many threads will do map/unmap in parallel */ - __u32 seconds; /* how long the test will last */ - __s32 node; /* which numa node this benchmark will run on */ - __u32 dma_bits; /* DMA addressing capability */ - __u32 dma_dir; /* DMA data direction */ - __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ - __u8 expansion[76]; /* For future use */ -}; - struct map_benchmark_data { struct map_benchmark bparam; struct device *dev; diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c index 485dff51bad2..c3b3c09e995e 100644 --- a/tools/testing/selftests/dma/dma_map_benchmark.c +++ b/tools/testing/selftests/dma/dma_map_benchmark.c @@ -10,40 +10,17 @@ #include #include #include +#include #include #define NSEC_PER_MSEC 1000000L -#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) -#define DMA_MAP_MAX_THREADS 1024 -#define DMA_MAP_MAX_SECONDS 300 -#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) - -#define DMA_MAP_BIDIRECTIONAL 0 -#define DMA_MAP_TO_DEVICE 1 -#define DMA_MAP_FROM_DEVICE 2 - static char *directions[] = { "BIDIRECTIONAL", "TO_DEVICE", "FROM_DEVICE", }; -struct map_benchmark { - __u64 avg_map_100ns; /* average map latency in 100ns */ - __u64 map_stddev; /* standard deviation of map latency */ - __u64 avg_unmap_100ns; /* as above */ - __u64 unmap_stddev; - __u32 threads; /* how many threads will do map/unmap in parallel */ - __u32 seconds; /* how long the test will last */ - __s32 node; /* which numa node this benchmark will run on */ - __u32 dma_bits; /* DMA addressing capability */ - __u32 dma_dir; /* DMA data direction */ - __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ - __u8 expansion[76]; /* For future use */ -}; - int main(int argc, char **argv) { struct map_benchmark map; -- cgit v1.2.3-71-gd317 From e7a6c00dc77aedf27a601738ea509f1caea6d673 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 4 Mar 2022 08:22:22 -0700 Subject: io_uring: add support for registering ring file descriptors Lots of workloads use multiple threads, in which case the file table is shared between them. This makes getting and putting the ring file descriptor for each io_uring_enter(2) system call more expensive, as it involves an atomic get and put for each call. Similarly to how we allow registering normal file descriptors to avoid this overhead, add support for an io_uring_register(2) API that allows to register the ring fds themselves: 1) IORING_REGISTER_RING_FDS - takes an array of io_uring_rsrc_update structs, and registers them with the task. 2) IORING_UNREGISTER_RING_FDS - takes an array of io_uring_src_update structs, and unregisters them. When a ring fd is registered, it is internally represented by an offset. This offset is returned to the application, and the application then uses this offset and sets IORING_ENTER_REGISTERED_RING for the io_uring_enter(2) system call. This works just like using a registered file descriptor, rather than a real one, in an SQE, where IOSQE_FIXED_FILE gets set to tell io_uring that we're using an internal offset/descriptor rather than a real file descriptor. In initial testing, this provides a nice bump in performance for threaded applications in real world cases where the batch count (eg number of requests submitted per io_uring_enter(2) invocation) is low. In a microbenchmark, submitting NOP requests, we see the following increases in performance: Requests per syscall Baseline Registered Increase ---------------------------------------------------------------- 1 ~7030K ~8080K +15% 2 ~13120K ~14800K +13% 4 ~22740K ~25300K +11% Co-developed-by: Xiaoguang Wang Signed-off-by: Jens Axboe --- fs/io_uring.c | 182 ++++++++++++++++++++++++++++++++++++++++-- include/linux/io_uring.h | 5 +- include/uapi/linux/io_uring.h | 13 ++- 3 files changed, 190 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index f8fd3b2bb30d..daa3609b742f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -466,6 +466,11 @@ struct io_ring_ctx { }; }; +/* + * Arbitrary limit, can be raised if need be + */ +#define IO_RINGFD_REG_MAX 16 + struct io_uring_task { /* submission side */ int cached_refs; @@ -481,6 +486,7 @@ struct io_uring_task { struct io_wq_work_list task_list; struct io_wq_work_list prior_task_list; struct callback_head task_work; + struct file **registered_rings; bool task_running; }; @@ -8788,8 +8794,16 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task, if (unlikely(!tctx)) return -ENOMEM; + tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX, + sizeof(struct file *), GFP_KERNEL); + if (unlikely(!tctx->registered_rings)) { + kfree(tctx); + return -ENOMEM; + } + ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); if (unlikely(ret)) { + kfree(tctx->registered_rings); kfree(tctx); return ret; } @@ -8798,6 +8812,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task, if (IS_ERR(tctx->io_wq)) { ret = PTR_ERR(tctx->io_wq); percpu_counter_destroy(&tctx->inflight); + kfree(tctx->registered_rings); kfree(tctx); return ret; } @@ -8822,6 +8837,7 @@ void __io_uring_free(struct task_struct *tsk) WARN_ON_ONCE(tctx->io_wq); WARN_ON_ONCE(tctx->cached_refs); + kfree(tctx->registered_rings); percpu_counter_destroy(&tctx->inflight); kfree(tctx); tsk->io_uring = NULL; @@ -10043,6 +10059,139 @@ void __io_uring_cancel(bool cancel_all) io_uring_cancel_generic(cancel_all, NULL); } +void io_uring_unreg_ringfd(void) +{ + struct io_uring_task *tctx = current->io_uring; + int i; + + for (i = 0; i < IO_RINGFD_REG_MAX; i++) { + if (tctx->registered_rings[i]) { + fput(tctx->registered_rings[i]); + tctx->registered_rings[i] = NULL; + } + } +} + +static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, + int start, int end) +{ + struct file *file; + int offset; + + for (offset = start; offset < end; offset++) { + offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); + if (tctx->registered_rings[offset]) + continue; + + file = fget(fd); + if (!file) { + return -EBADF; + } else if (file->f_op != &io_uring_fops) { + fput(file); + return -EOPNOTSUPP; + } + tctx->registered_rings[offset] = file; + return offset; + } + + return -EBUSY; +} + +/* + * Register a ring fd to avoid fdget/fdput for each io_uring_enter() + * invocation. User passes in an array of struct io_uring_rsrc_update + * with ->data set to the ring_fd, and ->offset given for the desired + * index. If no index is desired, application may set ->offset == -1U + * and we'll find an available index. Returns number of entries + * successfully processed, or < 0 on error if none were processed. + */ +static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update __user *arg = __arg; + struct io_uring_rsrc_update reg; + struct io_uring_task *tctx; + int ret, i; + + if (!nr_args || nr_args > IO_RINGFD_REG_MAX) + return -EINVAL; + + mutex_unlock(&ctx->uring_lock); + ret = io_uring_add_tctx_node(ctx); + mutex_lock(&ctx->uring_lock); + if (ret) + return ret; + + tctx = current->io_uring; + for (i = 0; i < nr_args; i++) { + int start, end; + + if (copy_from_user(®, &arg[i], sizeof(reg))) { + ret = -EFAULT; + break; + } + + if (reg.offset == -1U) { + start = 0; + end = IO_RINGFD_REG_MAX; + } else { + if (reg.offset >= IO_RINGFD_REG_MAX) { + ret = -EINVAL; + break; + } + start = reg.offset; + end = start + 1; + } + + ret = io_ring_add_registered_fd(tctx, reg.data, start, end); + if (ret < 0) + break; + + reg.offset = ret; + if (copy_to_user(&arg[i], ®, sizeof(reg))) { + fput(tctx->registered_rings[reg.offset]); + tctx->registered_rings[reg.offset] = NULL; + ret = -EFAULT; + break; + } + } + + return i ? i : ret; +} + +static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update __user *arg = __arg; + struct io_uring_task *tctx = current->io_uring; + struct io_uring_rsrc_update reg; + int ret = 0, i; + + if (!nr_args || nr_args > IO_RINGFD_REG_MAX) + return -EINVAL; + if (!tctx) + return 0; + + for (i = 0; i < nr_args; i++) { + if (copy_from_user(®, &arg[i], sizeof(reg))) { + ret = -EFAULT; + break; + } + if (reg.offset >= IO_RINGFD_REG_MAX) { + ret = -EINVAL; + break; + } + + reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); + if (tctx->registered_rings[reg.offset]) { + fput(tctx->registered_rings[reg.offset]); + tctx->registered_rings[reg.offset] = NULL; + } + } + + return i ? i : ret; +} + static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff, size_t sz) { @@ -10173,12 +10322,28 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, io_run_task_work(); if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | - IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))) + IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_REGISTERED_RING))) return -EINVAL; - f = fdget(fd); - if (unlikely(!f.file)) - return -EBADF; + /* + * Ring fd has been registered via IORING_REGISTER_RING_FDS, we + * need only dereference our task private array to find it. + */ + if (flags & IORING_ENTER_REGISTERED_RING) { + struct io_uring_task *tctx = current->io_uring; + + if (!tctx || fd >= IO_RINGFD_REG_MAX) + return -EINVAL; + fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); + f.file = tctx->registered_rings[fd]; + if (unlikely(!f.file)) + return -EBADF; + } else { + f = fdget(fd); + if (unlikely(!f.file)) + return -EBADF; + } ret = -EOPNOTSUPP; if (unlikely(f.file->f_op != &io_uring_fops)) @@ -10252,7 +10417,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, out: percpu_ref_put(&ctx->refs); out_fput: - fdput(f); + if (!(flags & IORING_ENTER_REGISTERED_RING)) + fdput(f); return submitted ? submitted : ret; } @@ -11142,6 +11308,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_register_iowq_max_workers(ctx, arg); break; + case IORING_REGISTER_RING_FDS: + ret = io_ringfd_register(ctx, arg, nr_args); + break; + case IORING_UNREGISTER_RING_FDS: + ret = io_ringfd_unregister(ctx, arg, nr_args); + break; default: ret = -EINVAL; break; diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 649a4d7c241b..1814e698d861 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -9,11 +9,14 @@ struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +void io_uring_unreg_ringfd(void); static inline void io_uring_files_cancel(void) { - if (current->io_uring) + if (current->io_uring) { + io_uring_unreg_ringfd(); __io_uring_cancel(false); + } } static inline void io_uring_task_cancel(void) { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 787f491f0d2a..42b2fe84dbcd 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -257,10 +257,11 @@ struct io_cqring_offsets { /* * io_uring_enter(2) flags */ -#define IORING_ENTER_GETEVENTS (1U << 0) -#define IORING_ENTER_SQ_WAKEUP (1U << 1) -#define IORING_ENTER_SQ_WAIT (1U << 2) -#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_GETEVENTS (1U << 0) +#define IORING_ENTER_SQ_WAKEUP (1U << 1) +#define IORING_ENTER_SQ_WAIT (1U << 2) +#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_REGISTERED_RING (1U << 4) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -325,6 +326,10 @@ enum { /* set/get max number of io-wq workers */ IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + /* register/unregister io_uring fd with the ring */ + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + /* this goes last */ IORING_REGISTER_LAST }; -- cgit v1.2.3-71-gd317 From 382627824afb09cd86192f4b649fca8c5bfe5f40 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 10 Mar 2022 22:07:00 +0800 Subject: mm: slab: Delete unused SLAB_DEACTIVATED flag Since commit 9855609bde03 ("mm: memcg/slab: use a single set of kmem_caches for all accounted allocations") deleted all SLAB_DEACTIVATED users, therefore this flag is not needed any more, let's delete it. Signed-off-by: Xiongwei Song Acked-by: David Rientjes Reviewed-by: Roman Gushchin Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220310140701.87908-2-sxwjean@me.com --- include/linux/slab.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 37bde99b74af..b6b3eed6c7c4 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -117,9 +117,6 @@ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ -/* Slab deactivation flag */ -#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * -- cgit v1.2.3-71-gd317 From afcf5441b9ff22ac57244cd45ff102ebc2e32d1a Mon Sep 17 00:00:00 2001 From: Dan Li Date: Wed, 2 Mar 2022 23:43:23 -0800 Subject: arm64: Add gcc Shadow Call Stack support Shadow call stacks will be available in GCC >= 12, this patch makes the corresponding kernel configuration available when compiling the kernel with the gcc. Note that the implementation in GCC is slightly different from Clang. With SCS enabled, functions will only pop x30 once in the epilogue, like: str x30, [x18], #8 stp x29, x30, [sp, #-16]! ...... - ldp x29, x30, [sp], #16 //clang + ldr x29, [sp], #16 //GCC ldr x30, [x18, #-8]! Link: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ce09ab17ddd21f73ff2caf6eec3b0ee9b0e1a11e Reviewed-by: Nathan Chancellor Reviewed-by: Kees Cook Reviewed-by: Nick Desaulniers Signed-off-by: Dan Li Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220303074323.86282-1-ashimida@linux.alibaba.com --- arch/Kconfig | 19 ++++++++++--------- arch/arm64/Kconfig | 2 +- include/linux/compiler-gcc.h | 4 ++++ 3 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index c5b50bfe31c1..cabfac22f2fb 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -599,21 +599,22 @@ config STACKPROTECTOR_STRONG config ARCH_SUPPORTS_SHADOW_CALL_STACK bool help - An architecture should select this if it supports Clang's Shadow - Call Stack and implements runtime support for shadow stack + An architecture should select this if it supports the compiler's + Shadow Call Stack and implements runtime support for shadow stack switching. config SHADOW_CALL_STACK - bool "Clang Shadow Call Stack" - depends on CC_IS_CLANG && ARCH_SUPPORTS_SHADOW_CALL_STACK + bool "Shadow Call Stack" + depends on ARCH_SUPPORTS_SHADOW_CALL_STACK depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER help - This option enables Clang's Shadow Call Stack, which uses a - shadow stack to protect function return addresses from being - overwritten by an attacker. More information can be found in - Clang's documentation: + This option enables the compiler's Shadow Call Stack, which + uses a shadow stack to protect function return addresses from + being overwritten by an attacker. More information can be found + in the compiler's documentation: - https://clang.llvm.org/docs/ShadowCallStack.html + - Clang: https://clang.llvm.org/docs/ShadowCallStack.html + - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options Note that security guarantees in the kernel differ from the ones documented for user space. The kernel must store addresses diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b8ab790555c8..a6733b972212 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1239,7 +1239,7 @@ config HW_PERF_EVENTS config ARCH_HAS_FILTER_PGPROT def_bool y -# Supported by clang >= 7.0 +# Supported by clang >= 7.0 or GCC >= 12.0.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index ccbbd31b3aae..deff5b308470 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -97,6 +97,10 @@ #define KASAN_ABI_VERSION 4 #endif +#ifdef CONFIG_SHADOW_CALL_STACK +#define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) +#endif + #if __has_attribute(__no_sanitize_address__) #define __no_sanitize_address __attribute__((no_sanitize_address)) #else -- cgit v1.2.3-71-gd317 From b7f8dff09827c96032c34a945ee7757e394b5952 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 10 Mar 2022 11:45:58 -0500 Subject: dm: simplify dm_sumbit_bio_remap interface Remove the from_wq argument from dm_sumbit_bio_remap(). Eliminates the need for dm_sumbit_bio_remap() callers to know whether they are calling for a workqueue or from the original dm_submit_bio(). Add map_task to dm_io struct, record the map_task in alloc_io and clear it after all target ->map() calls have completed. Update dm_sumbit_bio_remap to check if 'current' matches io->map_task rather than rely on passed 'from_rq' argument. This change really simplifies the chore of porting each DM target to using dm_sumbit_bio_remap() because there is no longer the risk of programming error by not completely knowing all the different contexts a particular method that calls dm_sumbit_bio_remap() might be used in. Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 1 + drivers/md/dm-crypt.c | 6 +++--- drivers/md/dm-delay.c | 2 +- drivers/md/dm-thin.c | 4 ++-- drivers/md/dm.c | 8 ++++---- include/linux/device-mapper.h | 2 +- 6 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 8cc03c0c262e..8d3d11887343 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -237,6 +237,7 @@ struct dm_io { unsigned long start_time; void *data; struct hlist_node node; + struct task_struct *map_task; spinlock_t endio_lock; struct dm_stats_aux stats_aux; /* last member of dm_target_io is 'struct bio' */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ec746cc4b1f8..a1730cc8ae27 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1857,7 +1857,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) return 1; } - dm_submit_bio_remap(io->base_bio, clone, (gfp != CRYPT_MAP_READ_GFP)); + dm_submit_bio_remap(io->base_bio, clone); return 0; } @@ -1883,7 +1883,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - dm_submit_bio_remap(io->base_bio, clone, true); + dm_submit_bio_remap(io->base_bio, clone); } #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) @@ -1962,7 +1962,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { - dm_submit_bio_remap(io->base_bio, clone, true); + dm_submit_bio_remap(io->base_bio, clone); return; } diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index b25b45011b11..9a51bf51a859 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - dm_submit_bio_remap(bio, NULL, true); + dm_submit_bio_remap(bio, NULL); bio = n; } } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ba74bc22ba42..4d25d0e27031 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -755,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio) struct pool *pool = tc->pool; if (!bio_triggers_commit(tc, bio)) { - dm_submit_bio_remap(bio, NULL, true); + dm_submit_bio_remap(bio, NULL); return; } @@ -2383,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool) if (bio->bi_opf & REQ_PREFLUSH) bio_endio(bio); else - dm_submit_bio_remap(bio, NULL, true); + dm_submit_bio_remap(bio, NULL); } } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1c5c9036a20e..c470f54f9193 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -574,6 +574,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) this_cpu_inc(*md->pending_io); io->orig_bio = NULL; io->md = md; + io->map_task = current; spin_lock_init(&io->endio_lock); io->start_time = jiffies; @@ -1189,15 +1190,13 @@ static inline void __dm_submit_bio_remap(struct bio *clone, /* * @clone: clone bio that DM core passed to target's .map function * @tgt_clone: clone of @clone bio that target needs submitted - * @from_wq: caller is a workqueue thread managed by DM target * * Targets should use this interface to submit bios they take * ownership of when returning DM_MAPIO_SUBMITTED. * * Target should also enable ti->accounts_remapped_io */ -void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, - bool from_wq) +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) { struct dm_target_io *tio = clone_to_tio(clone); struct dm_io *io = tio->io; @@ -1212,7 +1211,7 @@ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, * Account io->origin_bio to DM dev on behalf of target * that took ownership of IO with DM_MAPIO_SUBMITTED. */ - if (!from_wq) { + if (io->map_task == current) { /* Still in target's map function */ io->start_io_acct = true; } else { @@ -1568,6 +1567,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, } error = __split_and_process_bio(&ci); + ci.io->map_task = NULL; if (error || !ci.sector_count) goto out; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 70cd9449275a..901ec191250c 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -471,7 +471,7 @@ int dm_suspended(struct dm_target *ti); int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); -void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, bool from_wq); +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); union map_info *dm_get_rq_mapinfo(struct request *rq); #ifdef CONFIG_BLK_DEV_ZONED -- cgit v1.2.3-71-gd317 From a2a591fb76e6f5461dfd04715b69c317e50c43a5 Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Tue, 8 Mar 2022 18:07:50 -0800 Subject: ACPI: AGDI: Add driver for Arm Generic Diagnostic Dump and Reset device ACPI for Arm Components 1.1 Platform Design Document v1.1 [0] specifices Arm Generic Diagnostic Device Interface (AGDI). It allows an admin to issue diagnostic dump and reset via an SDEI event or an interrupt. This patch implements SDEI path. [0] https://developer.arm.com/documentation/den0093/latest/ Signed-off-by: Ilkka Koskinen Reviewed-by: Russell King (Oracle) Acked-by: Lorenzo Pieralisi Signed-off-by: Rafael J. Wysocki --- drivers/acpi/arm64/Kconfig | 10 ++++ drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/agdi.c | 116 ++++++++++++++++++++++++++++++++++++++++++++ drivers/acpi/bus.c | 2 + include/linux/acpi_agdi.h | 13 +++++ 5 files changed, 142 insertions(+) create mode 100644 drivers/acpi/arm64/agdi.c create mode 100644 include/linux/acpi_agdi.h (limited to 'include/linux') diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 6dba187f4f2e..d4a72835f328 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -8,3 +8,13 @@ config ACPI_IORT config ACPI_GTDT bool + +config ACPI_AGDI + bool "Arm Generic Diagnostic Dump and Reset Device Interface" + depends on ARM_SDE_INTERFACE + help + Arm Generic Diagnostic Dump and Reset Device Interface (AGDI) is + a standard that enables issuing a non-maskable diagnostic dump and + reset command. + + If set, the kernel parses AGDI table and listens for the command. diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 66acbe77f46e..7b9e4045659d 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ACPI_AGDI) += agdi.o obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-y += dma.o diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c new file mode 100644 index 000000000000..4df337d545b7 --- /dev/null +++ b/drivers/acpi/arm64/agdi.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file implements handling of + * Arm Generic Diagnostic Dump and Reset Interface table (AGDI) + * + * Copyright (c) 2022, Ampere Computing LLC + */ + +#define pr_fmt(fmt) "ACPI: AGDI: " fmt + +#include +#include +#include +#include +#include + +struct agdi_data { + int sdei_event; +}; + +static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg) +{ + nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued"); + return 0; +} + +static int agdi_sdei_probe(struct platform_device *pdev, + struct agdi_data *adata) +{ + int err; + + err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev); + if (err) { + dev_err(&pdev->dev, "Failed to register for SDEI event %d", + adata->sdei_event); + return err; + } + + err = sdei_event_enable(adata->sdei_event); + if (err) { + sdei_event_unregister(adata->sdei_event); + dev_err(&pdev->dev, "Failed to enable event %d\n", + adata->sdei_event); + return err; + } + + return 0; +} + +static int agdi_probe(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + + if (!adata) + return -EINVAL; + + return agdi_sdei_probe(pdev, adata); +} + +static int agdi_remove(struct platform_device *pdev) +{ + struct agdi_data *adata = dev_get_platdata(&pdev->dev); + int err, i; + + err = sdei_event_disable(adata->sdei_event); + if (err) + return err; + + for (i = 0; i < 3; i++) { + err = sdei_event_unregister(adata->sdei_event); + if (err != -EINPROGRESS) + break; + + schedule(); + } + + return err; +} + +static struct platform_driver agdi_driver = { + .driver = { + .name = "agdi", + }, + .probe = agdi_probe, + .remove = agdi_remove, +}; + +void __init acpi_agdi_init(void) +{ + struct acpi_table_agdi *agdi_table; + struct agdi_data pdata; + struct platform_device *pdev; + acpi_status status; + + status = acpi_get_table(ACPI_SIG_AGDI, 0, + (struct acpi_table_header **) &agdi_table); + if (ACPI_FAILURE(status)) + return; + + if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) { + pr_warn("Interrupt signaling is not supported"); + goto err_put_table; + } + + pdata.sdei_event = agdi_table->sdei_event; + + pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata)); + if (IS_ERR(pdev)) + goto err_put_table; + + if (platform_driver_register(&agdi_driver)) + platform_device_unregister(pdev); + +err_put_table: + acpi_put_table((struct acpi_table_header *)agdi_table); +} diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index cd374210fb9f..747a7f98fc1b 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -26,6 +26,7 @@ #include #include #endif +#include #include #include #include @@ -1341,6 +1342,7 @@ static int __init acpi_init(void) acpi_debugger_init(); acpi_setup_sb_notify_handler(); acpi_viot_init(); + acpi_agdi_init(); return 0; } diff --git a/include/linux/acpi_agdi.h b/include/linux/acpi_agdi.h new file mode 100644 index 000000000000..f477f0b452fa --- /dev/null +++ b/include/linux/acpi_agdi.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ACPI_AGDI_H__ +#define __ACPI_AGDI_H__ + +#include + +#ifdef CONFIG_ACPI_AGDI +void __init acpi_agdi_init(void); +#else +static inline void acpi_agdi_init(void) {} +#endif +#endif /* __ACPI_AGDI_H__ */ -- cgit v1.2.3-71-gd317 From 9924fbb51e0ae30b8d7eec7c1c839d74da9678b3 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Thu, 10 Mar 2022 14:54:50 +0000 Subject: arch_topology: obtain cpu capacity using information from CPPC Define topology_init_cpu_capacity_cppc() to use highest performance values from _CPC objects to obtain and set maximum capacity information for each CPU. acpi_cppc_processor_probe() is a good point at which to trigger the initialization of CPU (u-arch) capacity values, as at this point the highest performance values can be obtained from each CPU's _CPC objects. Architectures can therefore use this functionality through arch_init_invariance_cppc(). The performance scale used by CPPC is a unified scale for all CPUs in the system. Therefore, by obtaining the raw highest performance values from the _CPC objects, and normalizing them on the [0, 1024] capacity scale, used by the task scheduler, we obtain the CPU capacity of each CPU. While an ACPI Notify(0x85) could alert about a change in the highest performance value, which should in turn retrigger the CPU capacity computations, this notification is not currently handled by the ACPI processor driver. When supported, a call to arch_init_invariance_cppc() would perform the update. Signed-off-by: Ionela Voinescu Acked-by: Sudeep Holla Tested-by: Valentin Schneider Tested-by: Yicong Yang Signed-off-by: Rafael J. Wysocki --- drivers/base/arch_topology.c | 45 ++++++++++++++++++++++++++++++++++++++++--- include/linux/arch_topology.h | 4 ++++ 2 files changed, 46 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 976154140f0b..1d6636ebaac5 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -339,6 +339,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) return !ret; } +#ifdef CONFIG_ACPI_CPPC_LIB +#include + +void topology_init_cpu_capacity_cppc(void) +{ + struct cppc_perf_caps perf_caps; + int cpu; + + if (likely(acpi_disabled || !acpi_cpc_valid())) + return; + + raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), + GFP_KERNEL); + if (!raw_capacity) + return; + + for_each_possible_cpu(cpu) { + if (!cppc_get_perf_caps(cpu, &perf_caps) && + (perf_caps.highest_perf >= perf_caps.nominal_perf) && + (perf_caps.highest_perf >= perf_caps.lowest_perf)) { + raw_capacity[cpu] = perf_caps.highest_perf; + pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", + cpu, raw_capacity[cpu]); + continue; + } + + pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); + pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); + goto exit; + } + + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + pr_debug("cpu_capacity: cpu_capacity initialization done\n"); + +exit: + free_raw_capacity(); +} +#endif + #ifdef CONFIG_CPU_FREQ static cpumask_var_t cpus_to_visit; static void parsing_done_workfn(struct work_struct *work); @@ -387,9 +427,8 @@ static int __init register_cpufreq_notifier(void) int ret; /* - * on ACPI-based systems we need to use the default cpu capacity - * until we have the necessary code to parse the cpu capacity, so - * skip registering cpufreq notifier. + * On ACPI-based systems skip registering cpufreq notifier as cpufreq + * information is not needed for cpu capacity initialization. */ if (!acpi_disabled || !raw_capacity) return -EINVAL; diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index cce6136b300a..58cbe18d825c 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -11,6 +11,10 @@ void topology_normalize_cpu_scale(void); int topology_update_cpu_topology(void); +#ifdef CONFIG_ACPI_CPPC_LIB +void topology_init_cpu_capacity_cppc(void); +#endif + struct device_node; bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); -- cgit v1.2.3-71-gd317 From 19397e8b546d20226153dafe5dce34c4393752c4 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 11:17:32 -0600 Subject: ptrace: Move ptrace_report_syscall into ptrace.h Move ptrace_report_syscall from tracehook.h into ptrace.h where it belongs. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-1-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/ptrace.h | 27 +++++++++++++++++++++++++++ include/linux/tracehook.h | 26 -------------------------- 2 files changed, 27 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 8aee2945ff08..91b1074edb4c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -413,4 +413,31 @@ static inline void user_single_step_report(struct pt_regs *regs) extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); + +/* + * ptrace report for syscall entry and exit looks identical. + */ +static inline int ptrace_report_syscall(unsigned long message) +{ + int ptrace = current->ptrace; + + if (!(ptrace & PT_PTRACED)) + return 0; + + current->ptrace_message = message; + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } + + current->ptrace_message = 0; + return fatal_signal_pending(current); +} #endif diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 88c007ab5ebc..998bc3863559 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -51,32 +51,6 @@ #include struct linux_binprm; -/* - * ptrace report for syscall entry and exit looks identical. - */ -static inline int ptrace_report_syscall(unsigned long message) -{ - int ptrace = current->ptrace; - - if (!(ptrace & PT_PTRACED)) - return 0; - - current->ptrace_message = message; - ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); - - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } - - current->ptrace_message = 0; - return fatal_signal_pending(current); -} /** * tracehook_report_syscall_entry - task is about to attempt a system call -- cgit v1.2.3-71-gd317 From 153474ba1a4aed0a7b797b4c2be8c35c7a4e57bd Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 11:46:37 -0600 Subject: ptrace: Create ptrace_report_syscall_{entry,exit} in ptrace.h Rename tracehook_report_syscall_{entry,exit} to ptrace_report_syscall_{entry,exit} and place them in ptrace.h There is no longer any generic tracehook infractructure so make these ptrace specific functions ptrace specific. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-3-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/Kconfig | 2 +- arch/alpha/kernel/ptrace.c | 5 ++-- arch/arc/kernel/ptrace.c | 5 ++-- arch/arm/kernel/ptrace.c | 5 ++-- arch/arm64/kernel/ptrace.c | 7 +++-- arch/csky/kernel/ptrace.c | 5 ++-- arch/h8300/kernel/ptrace.c | 5 ++-- arch/hexagon/kernel/traps.c | 6 ++--- arch/ia64/kernel/ptrace.c | 4 +-- arch/m68k/kernel/ptrace.c | 6 ++--- arch/microblaze/kernel/ptrace.c | 5 ++-- arch/mips/kernel/ptrace.c | 5 ++-- arch/nds32/include/asm/syscall.h | 2 +- arch/nds32/kernel/ptrace.c | 5 ++-- arch/nios2/kernel/ptrace.c | 5 ++-- arch/openrisc/kernel/ptrace.c | 5 ++-- arch/parisc/kernel/ptrace.c | 7 +++-- arch/powerpc/kernel/ptrace/ptrace.c | 8 +++--- arch/riscv/kernel/ptrace.c | 5 ++-- arch/sh/kernel/ptrace_32.c | 5 ++-- arch/sparc/kernel/ptrace_32.c | 5 ++-- arch/sparc/kernel/ptrace_64.c | 5 ++-- arch/um/kernel/ptrace.c | 5 ++-- arch/xtensa/kernel/ptrace.c | 5 ++-- include/asm-generic/syscall.h | 2 +- include/linux/entry-common.h | 6 ++--- include/linux/ptrace.h | 51 +++++++++++++++++++++++++++++++++++++ include/linux/tracehook.h | 51 ------------------------------------- include/uapi/linux/ptrace.h | 2 +- kernel/entry/common.c | 1 + 30 files changed, 109 insertions(+), 126 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..a517a949eb1d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -217,7 +217,7 @@ config TRACE_IRQFLAGS_SUPPORT # asm/syscall.h supplying asm-generic/syscall.h interface # linux/regset.h user_regset interfaces # CORE_DUMP_USE_REGSET #define'd in linux/elf.h -# TIF_SYSCALL_TRACE calls tracehook_report_syscall_{entry,exit} +# TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit} # TIF_NOTIFY_RESUME calls tracehook_notify_resume() # signal delivery calls tracehook_signal_handler() # diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index 8c43212ae38e..a1a239ea002d 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -323,7 +322,7 @@ asmlinkage unsigned long syscall_trace_enter(void) unsigned long ret = 0; struct pt_regs *regs = current_pt_regs(); if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(current_pt_regs())) + ptrace_report_syscall_entry(current_pt_regs())) ret = -1UL; audit_syscall_entry(regs->r0, regs->r16, regs->r17, regs->r18, regs->r19); return ret ?: current_pt_regs()->r0; @@ -334,5 +333,5 @@ syscall_trace_leave(void) { audit_syscall_exit(current_pt_regs()); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(current_pt_regs(), 0); + ptrace_report_syscall_exit(current_pt_regs(), 0); } diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 883391977fdf..54b419ac8bda 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include @@ -258,7 +257,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage int syscall_trace_entry(struct pt_regs *regs) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return ULONG_MAX; return regs->r8; @@ -266,5 +265,5 @@ asmlinkage int syscall_trace_entry(struct pt_regs *regs) asmlinkage void syscall_trace_exit(struct pt_regs *regs) { - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index e5aa3237853d..bfe88c6e60d5 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -843,8 +842,8 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) regs->ARM_ip = dir; if (dir == PTRACE_SYSCALL_EXIT) - tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_exit(regs, 0); + else if (ptrace_report_syscall_entry(regs)) current_thread_info()->abi_syscall = -1; regs->ARM_ip = ip; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index b7845575f86f..230a47b9189e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -1818,11 +1817,11 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) regs->regs[regno] = dir; if (dir == PTRACE_SYSCALL_ENTER) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) forget_syscall(regs); regs->regs[regno] = saved_reg; } else if (!test_thread_flag(TIF_SINGLESTEP)) { - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); regs->regs[regno] = saved_reg; } else { regs->regs[regno] = saved_reg; @@ -1832,7 +1831,7 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) * tracer modifications to the registers may have rewound the * state machine. */ - tracehook_report_syscall_exit(regs, 1); + ptrace_report_syscall_exit(regs, 1); } } diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 1a5f54e0d272..0f7e7b653c72 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -321,7 +320,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; if (secure_computing() == -1) @@ -339,7 +338,7 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, syscall_get_return_value(current, regs)); diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c index a11db009d0ea..a9898b27b756 100644 --- a/arch/h8300/kernel/ptrace.c +++ b/arch/h8300/kernel/ptrace.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -174,7 +173,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -196,5 +195,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 1240f038cce0..6447763ce5a9 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -348,7 +348,7 @@ void do_trap0(struct pt_regs *regs) /* allow strace to catch syscall args */ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs))) + ptrace_report_syscall_entry(regs))) return; /* return -ENOSYS somewhere? */ /* Interrupts should be re-enabled for syscall processing */ @@ -386,7 +386,7 @@ void do_trap0(struct pt_regs *regs) /* allow strace to get the syscall return state */ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE))) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); break; case TRAP_DEBUG: diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6a1439eaa050..6af64aae087d 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -1217,7 +1217,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, struct pt_regs regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(®s)) + if (ptrace_report_syscall_entry(®s)) return -ENOSYS; /* copy user rbs to kernel rbs */ @@ -1243,7 +1243,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(®s, step); + ptrace_report_syscall_exit(®s, step); /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index aa3a0b8d07e9..a0c99fe3118e 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -282,13 +282,13 @@ asmlinkage int syscall_trace_enter(void) int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(task_pt_regs(current)); + ret = ptrace_report_syscall_entry(task_pt_regs(current)); return ret; } asmlinkage void syscall_trace_leave(void) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(task_pt_regs(current), 0); + ptrace_report_syscall_exit(task_pt_regs(current), 0); } #endif /* CONFIG_COLDFIRE */ diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index badd286882ae..5234d0c1dcaa 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -140,7 +139,7 @@ asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs) secure_computing_strict(regs->r12); if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -161,7 +160,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } void ptrace_disable(struct task_struct *child) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index db7c5be1d4a3..567aec4abac0 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -1317,7 +1316,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) current_thread_info()->syscall = syscall; if (test_thread_flag(TIF_SYSCALL_TRACE)) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; syscall = current_thread_info()->syscall; } @@ -1376,7 +1375,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); user_enter(); } diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index 90aa56c94af1..04d55ce18d50 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h @@ -39,7 +39,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) * * It's only valid to call this when @task is stopped for system * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), - * after tracehook_report_syscall_entry() returned nonzero to prevent + * after ptrace_report_syscall_entry() returned nonzero to prevent * the system call from taking place. * * This rolls back the register state in @regs so it's as if the diff --git a/arch/nds32/kernel/ptrace.c b/arch/nds32/kernel/ptrace.c index d0eda870fbc2..6a6988cf689d 100644 --- a/arch/nds32/kernel/ptrace.c +++ b/arch/nds32/kernel/ptrace.c @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -103,7 +102,7 @@ void user_disable_single_step(struct task_struct *child) asmlinkage int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) forget_syscall(regs); } return regs->syscallno; @@ -113,6 +112,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { int step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c index a6ea9e1b4f61..cd62f310778b 100644 --- a/arch/nios2/kernel/ptrace.c +++ b/arch/nios2/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -134,7 +133,7 @@ asmlinkage int do_syscall_trace_enter(void) int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(task_pt_regs(current)); + ret = ptrace_report_syscall_entry(task_pt_regs(current)); return ret; } @@ -142,5 +141,5 @@ asmlinkage int do_syscall_trace_enter(void) asmlinkage void do_syscall_trace_exit(void) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(task_pt_regs(current), 0); + ptrace_report_syscall_exit(task_pt_regs(current), 0); } diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 4d60ae2a12fa..b971740fc2aa 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -159,7 +158,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -181,5 +180,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 65de6c4c9354..96ef6a6b66e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -316,7 +315,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) { - int rc = tracehook_report_syscall_entry(regs); + int rc = ptrace_report_syscall_entry(regs); /* * As tracesys_next does not set %r28 to -ENOSYS @@ -327,7 +326,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) if (rc) { /* * A nonzero return code from - * tracehook_report_syscall_entry() tells us + * ptrace_report_syscall_entry() tells us * to prevent the syscall execution. Skip * the syscall call and the syscall restart handling. * @@ -381,7 +380,7 @@ void do_syscall_trace_exit(struct pt_regs *regs) #endif if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, stepping); + ptrace_report_syscall_exit(regs, stepping); } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index c43f77e2ac31..f394b0d6473f 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include @@ -263,12 +263,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); if (flags) { - int rc = tracehook_report_syscall_entry(regs); + int rc = ptrace_report_syscall_entry(regs); if (unlikely(flags & _TIF_SYSCALL_EMU)) { /* * A nonzero return code from - * tracehook_report_syscall_entry() tells us to prevent + * ptrace_report_syscall_entry() tells us to prevent * the syscall execution, but we are not going to * execute it anyway. * @@ -334,7 +334,7 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } void __init pt_regs_check(void); diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index a89243730153..793c7da0554b 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -17,7 +17,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -241,7 +240,7 @@ long arch_ptrace(struct task_struct *child, long request, __visible int do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; /* @@ -266,7 +265,7 @@ __visible void do_syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 5281685f6ad1..d417988d9770 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -456,7 +455,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + ptrace_report_syscall_entry(regs)) { regs->regs[0] = -ENOSYS; return -1; } @@ -484,5 +483,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 5318174a0268..e7db48acb838 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -439,9 +438,9 @@ asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p) if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (syscall_exit_p) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); else - ret = tracehook_report_syscall_entry(regs); + ret = ptrace_report_syscall_entry(regs); } return ret; diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 2b92155db8a5..86a7eb5c27ba 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -1095,7 +1094,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) user_exit(); if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(regs); + ret = ptrace_report_syscall_entry(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->u_regs[UREG_G1]); @@ -1118,7 +1117,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) trace_sys_exit(regs, regs->u_regs[UREG_I0]); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); if (test_thread_flag(TIF_NOHZ)) user_enter(); diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index b425f47bddbb..bfaf6ab1ac03 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -135,7 +134,7 @@ int syscall_trace_enter(struct pt_regs *regs) if (!test_thread_flag(TIF_SYSCALL_TRACE)) return 0; - return tracehook_report_syscall_entry(regs); + return ptrace_report_syscall_entry(regs); } void syscall_trace_leave(struct pt_regs *regs) @@ -151,7 +150,7 @@ void syscall_trace_leave(struct pt_regs *regs) if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); /* force do_signal() --> is_syscall() */ if (ptraced & PT_PTRACED) set_thread_flag(TIF_SIGPENDING); diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index bb3f4797d212..323c678a691f 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #define CREATE_TRACE_POINTS @@ -550,7 +549,7 @@ int do_syscall_trace_enter(struct pt_regs *regs) regs->areg[2] = -ENOSYS; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + ptrace_report_syscall_entry(regs)) { regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; return 0; @@ -583,5 +582,5 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 81695eb02a12..5a80fe728dc8 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -44,7 +44,7 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); * * It's only valid to call this when @task is stopped for system * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT), after tracehook_report_syscall_entry() + * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry() * returned nonzero to prevent the system call from taking place. * * This rolls back the register state in @regs so it's as if the diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 2e2b8d6140ed..a670e9fba7a9 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -3,7 +3,7 @@ #define __LINUX_ENTRYCOMMON_H #include -#include +#include #include #include #include @@ -95,7 +95,7 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs #ifndef arch_syscall_enter_tracehook static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) { - return tracehook_report_syscall_entry(regs); + return ptrace_report_syscall_entry(regs); } #endif @@ -294,7 +294,7 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); #ifndef arch_syscall_exit_tracehook static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) { - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 91b1074edb4c..5310f43e4762 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -440,4 +440,55 @@ static inline int ptrace_report_syscall(unsigned long message) current->ptrace_message = 0; return fatal_signal_pending(current); } + +/** + * ptrace_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or + * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just + * entered the kernel for a system call. Full user register state is + * available here. Changing the values in @regs can affect the system + * call number and arguments to be tried. It is safe to block here, + * preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int ptrace_report_syscall_entry( + struct pt_regs *regs) +{ + return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); +} + +/** + * ptrace_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when + * the current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step) +{ + if (step) + user_single_step_report(regs); + else + ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); +} #endif diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 998bc3863559..819e82ac09bd 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -52,57 +52,6 @@ struct linux_binprm; -/** - * tracehook_report_syscall_entry - task is about to attempt a system call - * @regs: user register state of current task - * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just - * entered the kernel for a system call. Full user register state is - * available here. Changing the values in @regs can affect the system - * call number and arguments to be tried. It is safe to block here, - * preventing the system call from beginning. - * - * Returns zero normally, or nonzero if the calling arch code should abort - * the system call. That must prevent normal entry so no system call is - * made. If @task ever returns to user mode after this, its register state - * is unspecified, but should be something harmless like an %ENOSYS error - * return. It should preserve enough information so that syscall_rollback() - * can work (see asm-generic/syscall.h). - * - * Called without locks, just after entering kernel mode. - */ -static inline __must_check int tracehook_report_syscall_entry( - struct pt_regs *regs) -{ - return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); -} - -/** - * tracehook_report_syscall_exit - task has just finished a system call - * @regs: user register state of current task - * @step: nonzero if simulating single-step or block-step - * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when - * the current task has just finished an attempted system call. Full - * user register state is available here. It is safe to block here, - * preventing signals from being processed. - * - * If @step is nonzero, this report is also in lieu of the normal - * trap that would follow the system call instruction because - * user_enable_block_step() or user_enable_single_step() was used. - * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. - * - * Called without locks, just before checking for pending signals. - */ -static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) -{ - if (step) - user_single_step_report(regs); - else - ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); -} - /** * tracehook_signal_handler - signal handler setup is complete * @stepping: nonzero if debugger single-step or block-step in use diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index 3747bf816f9a..b7af92e07d1f 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -114,7 +114,7 @@ struct ptrace_rseq_configuration { /* * These values are stored in task->ptrace_message - * by tracehook_report_syscall_* to describe the current syscall-stop. + * by ptrace_report_syscall_* to describe the current syscall-stop. */ #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 diff --git a/kernel/entry/common.c b/kernel/entry/common.c index bad713684c2e..f52e57c4d6d8 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3-71-gd317 From 0cfcb2b9ef48bbcaf5d43b9f1893f63a938e8176 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 12:00:55 -0600 Subject: ptrace: Remove arch_syscall_{enter,exit}_tracehook These functions are alwasy one-to-one wrappers around ptrace_report_syscall_entry and ptrace_report_syscall_exit. So directly call the functions they are wrapping instead. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-4-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/entry-common.h | 43 ++----------------------------------------- kernel/entry/common.c | 4 ++-- 2 files changed, 4 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index a670e9fba7a9..9efbdda61f7a 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -79,26 +79,6 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs); static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} #endif -/** - * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry() - * @regs: Pointer to currents pt_regs - * - * Returns: 0 on success or an error code to skip the syscall. - * - * Defaults to tracehook_report_syscall_entry(). Can be replaced by - * architecture specific code. - * - * Invoked from syscall_enter_from_user_mode() - */ -static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs); - -#ifndef arch_syscall_enter_tracehook -static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) -{ - return ptrace_report_syscall_entry(regs); -} -#endif - /** * enter_from_user_mode - Establish state when coming from user mode * @@ -157,7 +137,7 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); * It handles the following work items: * * 1) syscall_work flag dependent invocations of - * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter() + * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() * 2) Invocation of audit_syscall_entry() */ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); @@ -279,25 +259,6 @@ static __always_inline void arch_exit_to_user_mode(void) { } */ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); -/** - * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() - * @regs: Pointer to currents pt_regs - * @step: Indicator for single step - * - * Defaults to tracehook_report_syscall_exit(). Can be replaced by - * architecture specific code. - * - * Invoked from syscall_exit_to_user_mode() - */ -static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); - -#ifndef arch_syscall_exit_tracehook -static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) -{ - ptrace_report_syscall_exit(regs, step); -} -#endif - /** * exit_to_user_mode - Fixup state when exiting to user mode * @@ -347,7 +308,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs); * - rseq syscall exit * - audit * - syscall tracing - * - tracehook (single stepping) + * - ptrace (single stepping) * * 2) Preparatory work * - Exit to user mode loop (common TIF handling). Invokes diff --git a/kernel/entry/common.c b/kernel/entry/common.c index f52e57c4d6d8..f0b1daa1e8da 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -59,7 +59,7 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall, /* Handle ptrace */ if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) { - ret = arch_syscall_enter_tracehook(regs); + ret = ptrace_report_syscall_entry(regs); if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) return -1L; } @@ -253,7 +253,7 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long work) step = report_single_step(work); if (step || work & SYSCALL_WORK_SYSCALL_TRACE) - arch_syscall_exit_tracehook(regs, step); + ptrace_report_syscall_exit(regs, step); } /* -- cgit v1.2.3-71-gd317 From c145137dc990fd67b52fbc52faae5ba46f168cca Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 12:04:27 -0600 Subject: ptrace: Remove tracehook_signal_handler The two line function tracehook_signal_handler is only called from signal_delivered. Expand it inline in signal_delivered and remove it. Just to make it easier to understand what is going on. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-5-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/Kconfig | 1 - include/linux/tracehook.h | 17 ----------------- kernel/signal.c | 3 ++- 3 files changed, 2 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index a517a949eb1d..6382520ef0a5 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -219,7 +219,6 @@ config TRACE_IRQFLAGS_SUPPORT # CORE_DUMP_USE_REGSET #define'd in linux/elf.h # TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit} # TIF_NOTIFY_RESUME calls tracehook_notify_resume() -# signal delivery calls tracehook_signal_handler() # config HAVE_ARCH_TRACEHOOK bool diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 819e82ac09bd..b77bf4917196 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -52,23 +52,6 @@ struct linux_binprm; -/** - * tracehook_signal_handler - signal handler setup is complete - * @stepping: nonzero if debugger single-step or block-step in use - * - * Called by the arch code after a signal handler has been set up. - * Register and stack state reflects the user handler about to run. - * Signal mask changes have already been made. - * - * Called without locks, shortly before returning to user mode - * (or handling more signals). - */ -static inline void tracehook_signal_handler(int stepping) -{ - if (stepping) - ptrace_notify(SIGTRAP); -} - /** * set_notify_resume - cause tracehook_notify_resume() to be called * @task: task that will call tracehook_notify_resume() diff --git a/kernel/signal.c b/kernel/signal.c index 38602738866e..0e0bd1c1068b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2898,7 +2898,8 @@ static void signal_delivered(struct ksignal *ksig, int stepping) set_current_blocked(&blocked); if (current->sas_ss_flags & SS_AUTODISARM) sas_ss_reset(current); - tracehook_signal_handler(stepping); + if (stepping) + ptrace_notify(SIGTRAP); } void signal_setup_done(int failed, struct ksignal *ksig, int stepping) -- cgit v1.2.3-71-gd317 From 8ca07e17c9dd4c4afcb4a3f2ea8f0a0d41c0f982 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 28 Jan 2022 13:55:47 -0600 Subject: task_work: Remove unnecessary include from posix_timers.h Break a header file circular dependency by removing the unnecessary include of task_work.h from posix_timers.h. sched.h -> posix-timers.h posix-timers.h -> task_work.h task_work.h -> sched.h Add missing includes of task_work.h to: arch/x86/mm/tlb.c kernel/time/posix-cpu-timers.c Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-6-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/x86/mm/tlb.c | 1 + include/linux/posix-timers.h | 1 - kernel/time/posix-cpu-timers.c | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a6cf56a14939..6eb4d91d5365 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 5bbcd280bfd2..83539bb2f023 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -6,7 +6,6 @@ #include #include #include -#include struct kernel_siginfo; struct task_struct; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 96b4e7810426..9190d9eb236d 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "posix-timers.h" -- cgit v1.2.3-71-gd317 From 7f62d40d9cb50fd146fe8ff071f98fa3c1855083 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 08:52:41 -0600 Subject: task_work: Introduce task_work_pending Wrap the test of task->task_works in a helper function to make it clear what is being tested. All of the other readers of task->task_work use READ_ONCE and this is even necessary on current as other processes can update task->task_work. So for consistency I have added READ_ONCE into task_work_pending. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-7-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/io_uring.c | 6 +++--- include/linux/task_work.h | 5 +++++ include/linux/tracehook.h | 4 ++-- kernel/signal.c | 4 ++-- kernel/task_work.c | 2 +- 5 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index e54c4127422e..e85261079a78 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2590,7 +2590,7 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) static inline bool io_run_task_work(void) { - if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) { __set_current_state(TASK_RUNNING); tracehook_notify_signal(); return true; @@ -7602,7 +7602,7 @@ static int io_sq_thread(void *data) } prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); - if (!io_sqd_events_pending(sqd) && !current->task_works) { + if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) { bool needs_sched = true; list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { @@ -10321,7 +10321,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, hlist_for_each_entry(req, list, hash_node) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, - req->task->task_works != NULL); + task_work_pending(req->task)); } seq_puts(m, "CqOverflowList:\n"); diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 5b8a93f288bb..897494b597ba 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -19,6 +19,11 @@ enum task_work_notify_mode { TWA_SIGNAL, }; +static inline bool task_work_pending(struct task_struct *task) +{ + return READ_ONCE(task->task_works); +} + int task_work_add(struct task_struct *task, struct callback_head *twork, enum task_work_notify_mode mode); diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b77bf4917196..fa834a22e86e 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -90,7 +90,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); - if (unlikely(current->task_works)) + if (unlikely(task_work_pending(current))) task_work_run(); #ifdef CONFIG_KEYS_REQUEST_CACHE @@ -115,7 +115,7 @@ static inline void tracehook_notify_signal(void) { clear_thread_flag(TIF_NOTIFY_SIGNAL); smp_mb__after_atomic(); - if (current->task_works) + if (task_work_pending(current)) task_work_run(); } diff --git a/kernel/signal.c b/kernel/signal.c index 0e0bd1c1068b..3b4cf25fb9b3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2344,7 +2344,7 @@ static void ptrace_do_notify(int signr, int exit_code, int why) void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); - if (unlikely(current->task_works)) + if (unlikely(task_work_pending(current))) task_work_run(); spin_lock_irq(¤t->sighand->siglock); @@ -2626,7 +2626,7 @@ bool get_signal(struct ksignal *ksig) struct signal_struct *signal = current->signal; int signr; - if (unlikely(current->task_works)) + if (unlikely(task_work_pending(current))) task_work_run(); /* diff --git a/kernel/task_work.c b/kernel/task_work.c index 1698fbe6f0e1..cc6fccb0e24d 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -78,7 +78,7 @@ task_work_cancel_match(struct task_struct *task, struct callback_head *work; unsigned long flags; - if (likely(!task->task_works)) + if (likely(!task_work_pending(task))) return NULL; /* * If cmpxchg() fails we continue without updating pprev. -- cgit v1.2.3-71-gd317 From 3b5d4ddf8fe1f60082513f94bae586ac80188a03 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:04:50 -0800 Subject: bpf: net: Remove TC_AT_INGRESS_OFFSET and SKB_MONO_DELIVERY_TIME_OFFSET macro This patch removes the TC_AT_INGRESS_OFFSET and SKB_MONO_DELIVERY_TIME_OFFSET macros. Instead, PKT_VLAN_PRESENT_OFFSET is used because all of them are at the same offset. Comment is added to make it clear that changing the position of tc_at_ingress or mono_delivery_time will require to adjust the defined macros. The earlier discussion can be found here: https://lore.kernel.org/bpf/419d994e-ff61-7c11-0ec7-11fefcb0186e@iogearbox.net/ Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090450.3710955-1-kafai@fb.com --- include/linux/skbuff.h | 10 +++++----- net/core/filter.c | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2be263184d1e..7b0cb10e70cb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,10 +960,10 @@ struct sk_buff { __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 dst_pending_confirm:1; - __u8 mono_delivery_time:1; + __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; - __u8 tc_at_ingress:1; + __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ #endif #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; @@ -1062,7 +1062,9 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move pkt_vlan_present around you also must adapt these constants */ +/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time + * around, you also must adapt these constants. + */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_VLAN_PRESENT_BIT 7 #define TC_AT_INGRESS_MASK (1 << 0) @@ -1073,8 +1075,6 @@ struct sk_buff { #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) -#define TC_AT_INGRESS_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) -#define SKB_MONO_DELIVERY_TIME_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) #ifdef __KERNEL__ /* diff --git a/net/core/filter.c b/net/core/filter.c index 88767f7da150..738a294a3c82 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8896,7 +8896,7 @@ static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); @@ -8912,7 +8912,7 @@ static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); #ifdef CONFIG_NET_CLS_ACT - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); /* At ingress, value_reg = 0 */ @@ -8959,14 +8959,14 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, if (!prog->delivery_time_access) { __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, * so check the skb->mono_delivery_time. */ *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); @@ -8992,18 +8992,18 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, if (!prog->delivery_time_access) { __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. * Clear the skb->mono_delivery_time. */ *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); } #endif -- cgit v1.2.3-71-gd317 From 9bb984f28d5bcb917d35d930fcfb89f90f9449fd Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:05:09 -0800 Subject: bpf: Remove BPF_SKB_DELIVERY_TIME_NONE and rename s/delivery_time_/tstamp_/ This patch is to simplify the uapi bpf.h regarding to the tstamp type and use a similar way as the kernel to describe the value stored in __sk_buff->tstamp. My earlier thought was to avoid describing the semantic and clock base for the rcv timestamp until there is more clarity on the use case, so the __sk_buff->delivery_time_type naming instead of __sk_buff->tstamp_type. With some thoughts, it can reuse the UNSPEC naming. This patch first removes BPF_SKB_DELIVERY_TIME_NONE and also rename BPF_SKB_DELIVERY_TIME_UNSPEC to BPF_SKB_TSTAMP_UNSPEC and BPF_SKB_DELIVERY_TIME_MONO to BPF_SKB_TSTAMP_DELIVERY_MONO. The semantic of BPF_SKB_TSTAMP_DELIVERY_MONO is the same: __sk_buff->tstamp has delivery time in mono clock base. BPF_SKB_TSTAMP_UNSPEC means __sk_buff->tstamp has the (rcv) tstamp at ingress and the delivery time at egress. At egress, the clock base could be found from skb->sk->sk_clockid. __sk_buff->tstamp == 0 naturally means NONE, so NONE is not needed. With BPF_SKB_TSTAMP_UNSPEC for the rcv tstamp at ingress, the __sk_buff->delivery_time_type is also renamed to __sk_buff->tstamp_type which was also suggested in the earlier discussion: https://lore.kernel.org/bpf/b181acbe-caf8-502d-4b7b-7d96b9fc5d55@iogearbox.net/ The above will then make __sk_buff->tstamp and __sk_buff->tstamp_type the same as its kernel skb->tstamp and skb->mono_delivery_time counter part. The internal kernel function bpf_skb_convert_dtime_type_read() is then renamed to bpf_skb_convert_tstamp_type_read() and it can be simplified with the BPF_SKB_DELIVERY_TIME_NONE gone. A BPF_ALU32_IMM(BPF_AND) insn is also saved by using BPF_JMP32_IMM(BPF_JSET). The bpf helper bpf_skb_set_delivery_time() is also renamed to bpf_skb_set_tstamp(). The arg name is changed from dtime to tstamp also. It only allows setting tstamp 0 for BPF_SKB_TSTAMP_UNSPEC and it could be relaxed later if there is use case to change mono delivery time to non mono. prog->delivery_time_access is also renamed to prog->tstamp_type_access. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090509.3712315-1-kafai@fb.com --- include/linux/filter.h | 2 +- include/uapi/linux/bpf.h | 40 ++++++++++--------- net/core/filter.c | 88 ++++++++++++++++-------------------------- tools/include/uapi/linux/bpf.h | 40 ++++++++++--------- 4 files changed, 77 insertions(+), 93 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 9bf26307247f..05ed9bd31b45 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -573,7 +573,7 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ - delivery_time_access:1; /* Accessed __sk_buff->delivery_time_type */ + tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; diff --git a/net/core/filter.c b/net/core/filter.c index f914e4b13b18..03655f2074ae 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7388,36 +7388,36 @@ static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_skb_set_delivery_time, struct sk_buff *, skb, - u64, dtime, u32, dtime_type) +BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, + u64, tstamp, u32, tstamp_type) { /* skb_clear_delivery_time() is done for inet protocol */ if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -EOPNOTSUPP; - switch (dtime_type) { - case BPF_SKB_DELIVERY_TIME_MONO: - if (!dtime) + switch (tstamp_type) { + case BPF_SKB_TSTAMP_DELIVERY_MONO: + if (!tstamp) return -EINVAL; - skb->tstamp = dtime; + skb->tstamp = tstamp; skb->mono_delivery_time = 1; break; - case BPF_SKB_DELIVERY_TIME_NONE: - if (dtime) + case BPF_SKB_TSTAMP_UNSPEC: + if (tstamp) return -EINVAL; skb->tstamp = 0; skb->mono_delivery_time = 0; break; default: - return -EOPNOTSUPP; + return -EINVAL; } return 0; } -static const struct bpf_func_proto bpf_skb_set_delivery_time_proto = { - .func = bpf_skb_set_delivery_time, +static const struct bpf_func_proto bpf_skb_set_tstamp_proto = { + .func = bpf_skb_set_tstamp, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, @@ -7786,8 +7786,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_gen_syncookie_proto; case BPF_FUNC_sk_assign: return &bpf_sk_assign_proto; - case BPF_FUNC_skb_set_delivery_time: - return &bpf_skb_set_delivery_time_proto; + case BPF_FUNC_skb_set_tstamp: + return &bpf_skb_set_tstamp_proto; #endif default: return bpf_sk_base_func_proto(func_id); @@ -8127,9 +8127,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): return false; - case offsetofend(struct __sk_buff, delivery_time_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: + case offsetofend(struct __sk_buff, tstamp_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: /* Explicitly prohibit access to padding in __sk_buff. */ return false; default: @@ -8484,14 +8484,14 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): /* The convert_ctx_access() on reading and writing * __sk_buff->tstamp depends on whether the bpf prog - * has used __sk_buff->delivery_time_type or not. - * Thus, we need to set prog->delivery_time_access + * has used __sk_buff->tstamp_type or not. + * Thus, we need to set prog->tstamp_type_access * earlier during is_valid_access() here. */ - ((struct bpf_prog *)prog)->delivery_time_access = 1; + ((struct bpf_prog *)prog)->tstamp_type_access = 1; return size == sizeof(__u8); } @@ -8888,42 +8888,22 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, - struct bpf_insn *insn) +static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, + struct bpf_insn *insn) { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; + /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_MONO */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_MONO); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 10 : 5); - - *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, skb_reg, - offsetof(struct sk_buff, tstamp)); - *insn++ = BPF_JMP_IMM(BPF_JNE, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_NONE */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_NONE); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); - -#ifdef CONFIG_NET_CLS_ACT - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* At ingress, value_reg = 0 */ - *insn++ = BPF_MOV32_IMM(value_reg, 0); + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK, 2); + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); *insn++ = BPF_JMP_A(1); -#endif - - /* value_reg = BPF_SKB_DELIVERYT_TIME_UNSPEC */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_UNSPEC); + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); - /* 15 insns with CONFIG_NET_CLS_ACT */ return insn; } @@ -8956,11 +8936,11 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, read skb->tstamp as is if delivery_time_access is true. + * Thus, read skb->tstamp as is if tstamp_type_access is true. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; @@ -8990,13 +8970,13 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, write skb->tstamp as is if delivery_time_access is true. + * Thus, write skb->tstamp as is if tstamp_type_access is true. * Otherwise, writing at ingress will have to clear the * mono_delivery_time bit also. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); @@ -9329,8 +9309,8 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, insn = bpf_convert_tstamp_read(prog, si, insn); break; - case offsetof(struct __sk_buff, delivery_time_type): - insn = bpf_convert_dtime_type_read(si, insn); + case offsetof(struct __sk_buff, tstamp_type): + insn = bpf_convert_tstamp_type_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; -- cgit v1.2.3-71-gd317 From 26183cfe478c1d1d5cd1e3920a4b2c5b1980849d Mon Sep 17 00:00:00 2001 From: Colin Foster Date: Tue, 8 Mar 2022 22:25:44 -0800 Subject: net: phy: correct spelling error of media in documentation The header file incorrectly referenced "median-independant interface" instead of media. Correct this typo. Signed-off-by: Colin Foster Fixes: 4069a572d423 ("net: phy: Document core PHY structures") Reviewed-by: Russell King (Oracle) Link: https://lore.kernel.org/r/20220309062544.3073-1-colin.foster@in-advantage.com Signed-off-by: Jakub Kicinski --- include/linux/phy.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 6de8d7a90d78..8fa70ba063a5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -87,8 +87,8 @@ extern const int phy_10gbit_features_array[1]; * * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined - * @PHY_INTERFACE_MODE_MII: Median-independent interface - * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface + * @PHY_INTERFACE_MODE_MII: Media-independent interface + * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface -- cgit v1.2.3-71-gd317 From 8ba62d37949e248c698c26e0d82d72fda5d33ebf Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 09:51:14 -0600 Subject: task_work: Call tracehook_notify_signal from get_signal on all architectures Always handle TIF_NOTIFY_SIGNAL in get_signal. With commit 35d0b389f3b2 ("task_work: unconditionally run task_work from get_signal()") always calling task_work_run all of the work of tracehook_notify_signal is already happening except clearing TIF_NOTIFY_SIGNAL. Factor clear_notify_signal out of tracehook_notify_signal and use it in get_signal so that get_signal only needs one call of task_work_run. To keep the semantics in sync update xfer_to_guest_mode_work (which does not call get_signal) to call tracehook_notify_signal if either _TIF_SIGPENDING or _TIF_NOTIFY_SIGNAL. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-8-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/s390/kernel/signal.c | 4 ++-- arch/x86/kernel/signal.c | 4 ++-- include/linux/entry-common.h | 2 +- include/linux/tracehook.h | 9 +++++++-- kernel/entry/common.c | 12 ++---------- kernel/entry/kvm.c | 2 +- kernel/signal.c | 14 +++----------- 7 files changed, 18 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 307f5d99514d..ea9e5e8182cd 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -453,7 +453,7 @@ static void handle_signal(struct ksignal *ksig, sigset_t *oldset, * stack-frames in one go after that. */ -void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) +void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; sigset_t *oldset = sigmask_to_save(); @@ -466,7 +466,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) current->thread.system_call = test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; - if (has_signal && get_signal(&ksig)) { + if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ if (current->thread.system_call) { regs->int_code = current->thread.system_call; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index ec71e06ae364..de3d5b5724d8 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -861,11 +861,11 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) +void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; - if (has_signal && get_signal(&ksig)) { + if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 9efbdda61f7a..3537fd25f14e 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -257,7 +257,7 @@ static __always_inline void arch_exit_to_user_mode(void) { } * * Invoked from exit_to_user_mode_loop(). */ -void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); +void arch_do_signal_or_restart(struct pt_regs *regs); /** * exit_to_user_mode - Fixup state when exiting to user mode diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index fa834a22e86e..b44a7820c468 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -106,6 +106,12 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) rseq_handle_notify_resume(NULL, regs); } +static inline void clear_notify_signal(void) +{ + clear_thread_flag(TIF_NOTIFY_SIGNAL); + smp_mb__after_atomic(); +} + /* * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This * is currently used by TWA_SIGNAL based task_work, which requires breaking @@ -113,8 +119,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) */ static inline void tracehook_notify_signal(void) { - clear_thread_flag(TIF_NOTIFY_SIGNAL); - smp_mb__after_atomic(); + clear_notify_signal(); if (task_work_pending(current)) task_work_run(); } diff --git a/kernel/entry/common.c b/kernel/entry/common.c index f0b1daa1e8da..79eaf9b4b10d 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -139,15 +139,7 @@ void noinstr exit_to_user_mode(void) } /* Workaround to allow gradual conversion of architecture code */ -void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } - -static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) -{ - if (ti_work & _TIF_NOTIFY_SIGNAL) - tracehook_notify_signal(); - - arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); -} +void __weak arch_do_signal_or_restart(struct pt_regs *regs) { } static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) @@ -170,7 +162,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, klp_update_patch_state(current); if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - handle_signal_work(regs, ti_work); + arch_do_signal_or_restart(regs); if (ti_work & _TIF_NOTIFY_RESUME) tracehook_notify_resume(regs); diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 96d476e06c77..cabf36a489e4 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -8,7 +8,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) do { int ret; - if (ti_work & _TIF_NOTIFY_SIGNAL) + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) tracehook_notify_signal(); if (ti_work & _TIF_SIGPENDING) { diff --git a/kernel/signal.c b/kernel/signal.c index 3b4cf25fb9b3..8632b88982c9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2626,20 +2626,12 @@ bool get_signal(struct ksignal *ksig) struct signal_struct *signal = current->signal; int signr; + clear_notify_signal(); if (unlikely(task_work_pending(current))) task_work_run(); - /* - * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so - * that the arch handlers don't all have to do it. If we get here - * without TIF_SIGPENDING, just exit after running signal work. - */ - if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) { - if (test_thread_flag(TIF_NOTIFY_SIGNAL)) - tracehook_notify_signal(); - if (!task_sigpending(current)) - return false; - } + if (!task_sigpending(current)) + return false; if (unlikely(uprobe_deny_signal())) return false; -- cgit v1.2.3-71-gd317 From 7c5d8fa6fbb12a3f0eefe8762bfede508e147cb3 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 11:18:54 -0600 Subject: task_work: Decouple TIF_NOTIFY_SIGNAL and task_work There are a small handful of reasons besides pending signals that the kernel might want to break out of interruptible sleeps. The flag TIF_NOTIFY_SIGNAL and the helpers that set and clear TIF_NOTIFY_SIGNAL provide that the infrastructure for breaking out of interruptible sleeps and entering the return to user space slow path for those cases. Expand tracehook_notify_signal inline in it's callers and remove it, which makes clear that TIF_NOTIFY_SIGNAL and task_work are separate concepts. Update the comment on set_notify_signal to more accurately describe it's purpose. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-9-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- fs/io-wq.c | 4 +++- fs/io_uring.c | 4 +++- include/linux/tracehook.h | 15 ++------------- kernel/entry/kvm.c | 7 +++++-- 4 files changed, 13 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/io-wq.c b/fs/io-wq.c index bb7f161bb19c..8b9147873c2c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -515,7 +515,9 @@ static bool io_flush_signals(void) { if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) { __set_current_state(TASK_RUNNING); - tracehook_notify_signal(); + clear_notify_signal(); + if (task_work_pending(current)) + task_work_run(); return true; } return false; diff --git a/fs/io_uring.c b/fs/io_uring.c index e85261079a78..d5fbae1030f9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2592,7 +2592,9 @@ static inline bool io_run_task_work(void) { if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) { __set_current_state(TASK_RUNNING); - tracehook_notify_signal(); + clear_notify_signal(); + if (task_work_pending(current)) + task_work_run(); return true; } diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b44a7820c468..e5d676e841e3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -113,19 +113,8 @@ static inline void clear_notify_signal(void) } /* - * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This - * is currently used by TWA_SIGNAL based task_work, which requires breaking - * wait loops to ensure that task_work is noticed and run. - */ -static inline void tracehook_notify_signal(void) -{ - clear_notify_signal(); - if (task_work_pending(current)) - task_work_run(); -} - -/* - * Called when we have work to process from exit_to_user_mode_loop() + * Called to break out of interruptible wait loops, and enter the + * exit_to_user_mode_loop(). */ static inline void set_notify_signal(struct task_struct *task) { diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index cabf36a489e4..3ab5f98988c3 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -8,8 +8,11 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) do { int ret; - if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - tracehook_notify_signal(); + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { + clear_notify_signal(); + if (task_work_pending(current)) + task_work_run(); + } if (ti_work & _TIF_SIGPENDING) { kvm_handle_signal_exit(vcpu); -- cgit v1.2.3-71-gd317 From 593febb143d17aa2096cd123c9d62b6981eb1d97 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 11:46:54 -0600 Subject: signal: Move set_notify_signal and clear_notify_signal into sched/signal.h The header tracehook.h is no place for code to live. The functions set_notify_signal and clear_notify_signal are not about signals. They are about interruptions that act like signals. The fundamental signal primitives wind up calling set_notify_signal and clear_notify_signal. Which means they need to be maintained with the signal code. Since set_notify_signal and clear_notify_signal must be maintained with the signal subsystem move them into sched/signal.h and claim them as part of the signal subsystem. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-10-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/sched/signal.h | 17 +++++++++++++++++ include/linux/tracehook.h | 17 ----------------- 2 files changed, 17 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index b6ecb9fc4cd2..3c8b34876744 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -349,6 +349,23 @@ extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); +static inline void clear_notify_signal(void) +{ + clear_thread_flag(TIF_NOTIFY_SIGNAL); + smp_mb__after_atomic(); +} + +/* + * Called to break out of interruptible wait loops, and enter the + * exit_to_user_mode_loop(). + */ +static inline void set_notify_signal(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && + !wake_up_state(task, TASK_INTERRUPTIBLE)) + kick_process(task); +} + static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index e5d676e841e3..1b7365aef8da 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -106,21 +106,4 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) rseq_handle_notify_resume(NULL, regs); } -static inline void clear_notify_signal(void) -{ - clear_thread_flag(TIF_NOTIFY_SIGNAL); - smp_mb__after_atomic(); -} - -/* - * Called to break out of interruptible wait loops, and enter the - * exit_to_user_mode_loop(). - */ -static inline void set_notify_signal(struct task_struct *task) -{ - if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && - !wake_up_state(task, TASK_INTERRUPTIBLE)) - kick_process(task); -} - #endif /* */ -- cgit v1.2.3-71-gd317 From d3c51a0c8944e3a5bba458358b4c1f9ae2de0133 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 28 Jan 2022 14:36:58 -0600 Subject: resume_user_mode: Remove #ifdef TIF_NOTIFY_RESUME in set_notify_resume Every architecture defines TIF_NOTIFY_RESUME so remove the unnecessary ifdef. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-11-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- include/linux/tracehook.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 1b7365aef8da..946404ebe10b 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -63,10 +63,8 @@ struct linux_binprm; */ static inline void set_notify_resume(struct task_struct *task) { -#ifdef TIF_NOTIFY_RESUME if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) kick_process(task); -#endif } /** -- cgit v1.2.3-71-gd317 From 03248addadf1a5ef0a03cbcd5ec905b49adb9658 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 12:20:45 -0600 Subject: resume_user_mode: Move to resume_user_mode.h Move set_notify_resume and tracehook_notify_resume into resume_user_mode.h. While doing that rename tracehook_notify_resume to resume_user_mode_work. Update all of the places that included tracehook.h for these functions to include resume_user_mode.h instead. Update all of the callers of tracehook_notify_resume to call resume_user_mode_work. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-12-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/Kconfig | 2 +- arch/alpha/kernel/signal.c | 4 +-- arch/arc/kernel/signal.c | 4 +-- arch/arm/kernel/signal.c | 4 +-- arch/arm64/kernel/signal.c | 4 +-- arch/csky/kernel/signal.c | 4 +-- arch/h8300/kernel/signal.c | 4 +-- arch/hexagon/kernel/process.c | 4 +-- arch/hexagon/kernel/signal.c | 1 - arch/ia64/kernel/process.c | 4 +-- arch/ia64/kernel/ptrace.c | 2 +- arch/ia64/kernel/signal.c | 1 - arch/m68k/kernel/signal.c | 4 +-- arch/microblaze/kernel/signal.c | 4 +-- arch/mips/kernel/signal.c | 4 +-- arch/nds32/kernel/signal.c | 4 +-- arch/nios2/kernel/signal.c | 4 +-- arch/openrisc/kernel/signal.c | 4 +-- arch/parisc/kernel/signal.c | 4 +-- arch/powerpc/kernel/signal.c | 4 +-- arch/riscv/kernel/signal.c | 4 +-- arch/sh/kernel/signal_32.c | 4 +-- arch/sparc/kernel/signal32.c | 1 - arch/sparc/kernel/signal_32.c | 4 +-- arch/sparc/kernel/signal_64.c | 4 +-- arch/um/kernel/process.c | 4 +-- arch/xtensa/kernel/signal.c | 4 +-- block/blk-cgroup.c | 2 +- include/linux/entry-kvm.h | 2 +- include/linux/resume_user_mode.h | 64 ++++++++++++++++++++++++++++++++++++++++ include/linux/tracehook.h | 51 -------------------------------- kernel/entry/common.c | 4 +-- kernel/entry/kvm.c | 2 +- kernel/task_work.c | 2 +- mm/memcontrol.c | 2 +- 35 files changed, 117 insertions(+), 107 deletions(-) create mode 100644 include/linux/resume_user_mode.h (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 6382520ef0a5..2e3979c3d66d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -218,7 +218,7 @@ config TRACE_IRQFLAGS_SUPPORT # linux/regset.h user_regset interfaces # CORE_DUMP_USE_REGSET #define'd in linux/elf.h # TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit} -# TIF_NOTIFY_RESUME calls tracehook_notify_resume() +# TIF_NOTIFY_RESUME calls resume_user_mode_work() # config HAVE_ARCH_TRACEHOOK bool diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index d8ed71d5bed3..6f47f256fe80 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -531,7 +531,7 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags, do_signal(regs, r0, r19); r0 = 0; } else { - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } } local_irq_disable(); diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index cb2f88502baf..f748483628f2 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -49,7 +49,7 @@ #include #include #include -#include +#include #include #include @@ -438,5 +438,5 @@ void do_notify_resume(struct pt_regs *regs) * user mode */ if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index c532a6041066..459abc5d1819 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -627,7 +627,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } else if (thread_flags & _TIF_UPROBE) { uprobe_notify_resume(regs); } else { - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } } local_irq_disable(); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index d8aaf4b6f432..413c51de9d10 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include @@ -941,7 +941,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); if (thread_flags & _TIF_FOREIGN_FPSTATE) fpsimd_restore_current_state(); diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index c7b763d2f526..7a3149a27e4d 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -265,5 +265,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 75a1c36b105a..0716fc8a8ce2 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include @@ -283,5 +283,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 232dfd8956aa..ae3f728eeca0 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include /* * Program thread launch. Often defined as a macro in processor.h, @@ -178,7 +178,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) } if (thread_info_flags & _TIF_NOTIFY_RESUME) { - tracehook_notify_resume(regs); + resume_user_mode_work(regs); return 1; } diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index 94cc7ff52dce..bcba31e9e0ae 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -7,7 +7,6 @@ #include #include -#include #include #include diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 834df24a88f1..d7a256bd9d6b 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include @@ -179,7 +179,7 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) if (test_thread_flag(TIF_NOTIFY_RESUME)) { local_irq_enable(); /* force interrupt enable */ - tracehook_notify_resume(&scr->pt); + resume_user_mode_work(&scr->pt); } /* copy user rbs to kernel rbs */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6af64aae087d..a19acd9f5e1f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index c1b299760bf7..51cf6a7ec158 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 338817d0cb3f..49533f65958a 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include @@ -1109,5 +1109,5 @@ void do_notify_resume(struct pt_regs *regs) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 23e8a9336a29..561eb82d7af6 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -311,5 +311,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5bce782e694c..1a99f26bf99f 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include @@ -916,7 +916,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); user_enter(); } diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 7e3ca430a223..551caef595cb 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -380,5 +380,5 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2009ae2d3c3b..530b60c99545 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -319,7 +319,7 @@ asmlinkage int do_notify_resume(struct pt_regs *regs) return restart; } } else if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); return 0; } diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 92c5b70740f5..80f69740c731 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -309,7 +309,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } syscall = 0; } else { - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } } local_irq_disable(); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 46b1050640b8..2f7ebe9add20 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -602,5 +602,5 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index b93b87df499d..f7f8620663c7 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -9,7 +9,7 @@ * this archive for more details. */ -#include +#include #include #include #include @@ -294,7 +294,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) } if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } static unsigned long get_tm_stackpointer(struct task_struct *tsk) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index c2d5ecbe5526..d80bf5896c6f 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -317,5 +317,5 @@ asmlinkage __visible void do_notify_resume(struct pt_regs *regs, do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index dd3092911efa..90f495d35db2 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -503,5 +503,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, do_signal(regs, save_r0); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 6cc124a3bb98..f9fe502b81c6 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index ffab16369bea..80c89b362d8b 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -19,7 +19,7 @@ #include #include /* do_coredum */ #include -#include +#include #include #include @@ -524,7 +524,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 2a78d2af1265..8b9fc76cd3e0 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -552,7 +552,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); user_enter(); } diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 4a420778ed87..80504680be08 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -104,7 +104,7 @@ void interrupt_end(void) test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } int get_current_pid(void) diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index f6c949895b3e..6f68649e86ba 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -511,5 +511,5 @@ void do_notify_resume(struct pt_regs *regs) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 650f7e27989f..4d8be1634bc6 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include "blk.h" diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 07c878d6e323..6813171afccb 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -3,7 +3,7 @@ #define __LINUX_ENTRYKVM_H #include -#include +#include #include #include #include diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h new file mode 100644 index 000000000000..285189454449 --- /dev/null +++ b/include/linux/resume_user_mode.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef LINUX_RESUME_USER_MODE_H +#define LINUX_RESUME_USER_MODE_H + +#include +#include +#include +#include + +/** + * set_notify_resume - cause resume_user_mode_work() to be called + * @task: task that will call resume_user_mode_work() + * + * Calling this arranges that @task will call resume_user_mode_work() + * before returning to user mode. If it's already running in user mode, + * it will enter the kernel and call resume_user_mode_work() soon. + * If it's blocked, it will not be woken. + */ +static inline void set_notify_resume(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) + kick_process(task); +} + + +/** + * resume_user_mode_work - Perform work before returning to user mode + * @regs: user-mode registers of @current task + * + * This is called when %TIF_NOTIFY_RESUME has been set. Now we are + * about to return to user mode, and the user state in @regs can be + * inspected or adjusted. The caller in arch code has cleared + * %TIF_NOTIFY_RESUME before the call. If the flag gets set again + * asynchronously, this will be called again before we return to + * user mode. + * + * Called without locks. + */ +static inline void resume_user_mode_work(struct pt_regs *regs) +{ + clear_thread_flag(TIF_NOTIFY_RESUME); + /* + * This barrier pairs with task_work_add()->set_notify_resume() after + * hlist_add_head(task->task_works); + */ + smp_mb__after_atomic(); + if (unlikely(task_work_pending(current))) + task_work_run(); + +#ifdef CONFIG_KEYS_REQUEST_CACHE + if (unlikely(current->cached_requested_key)) { + key_put(current->cached_requested_key); + current->cached_requested_key = NULL; + } +#endif + + mem_cgroup_handle_over_high(); + blkcg_maybe_throttle_current(); + + rseq_handle_notify_resume(NULL, regs); +} + +#endif /* LINUX_RESUME_USER_MODE_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 946404ebe10b..9f6b3fd1880a 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -52,56 +52,5 @@ struct linux_binprm; -/** - * set_notify_resume - cause tracehook_notify_resume() to be called - * @task: task that will call tracehook_notify_resume() - * - * Calling this arranges that @task will call tracehook_notify_resume() - * before returning to user mode. If it's already running in user mode, - * it will enter the kernel and call tracehook_notify_resume() soon. - * If it's blocked, it will not be woken. - */ -static inline void set_notify_resume(struct task_struct *task) -{ - if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) - kick_process(task); -} - -/** - * tracehook_notify_resume - report when about to return to user mode - * @regs: user-mode registers of @current task - * - * This is called when %TIF_NOTIFY_RESUME has been set. Now we are - * about to return to user mode, and the user state in @regs can be - * inspected or adjusted. The caller in arch code has cleared - * %TIF_NOTIFY_RESUME before the call. If the flag gets set again - * asynchronously, this will be called again before we return to - * user mode. - * - * Called without locks. - */ -static inline void tracehook_notify_resume(struct pt_regs *regs) -{ - clear_thread_flag(TIF_NOTIFY_RESUME); - /* - * This barrier pairs with task_work_add()->set_notify_resume() after - * hlist_add_head(task->task_works); - */ - smp_mb__after_atomic(); - if (unlikely(task_work_pending(current))) - task_work_run(); - -#ifdef CONFIG_KEYS_REQUEST_CACHE - if (unlikely(current->cached_requested_key)) { - key_put(current->cached_requested_key); - current->cached_requested_key = NULL; - } -#endif - - mem_cgroup_handle_over_high(); - blkcg_maybe_throttle_current(); - - rseq_handle_notify_resume(NULL, regs); -} #endif /* */ diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 79eaf9b4b10d..a86823cad853 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include #include @@ -165,7 +165,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, arch_do_signal_or_restart(regs); if (ti_work & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(regs); + resume_user_mode_work(regs); /* Architecture specific TIF work */ arch_exit_to_user_mode_work(regs, ti_work); diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 3ab5f98988c3..9d09f489b60e 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -23,7 +23,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) schedule(); if (ti_work & _TIF_NOTIFY_RESUME) - tracehook_notify_resume(NULL); + resume_user_mode_work(NULL); ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work); if (ret) diff --git a/kernel/task_work.c b/kernel/task_work.c index cc6fccb0e24d..c59e1a49bc40 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include +#include static struct callback_head work_exited; /* all we need is ->next == NULL */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09d342c7cbd0..2aaa400f34d6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -59,7 +59,7 @@ #include #include #include -#include +#include #include #include #include "internal.h" -- cgit v1.2.3-71-gd317 From 355f841a3f8ca980c9682937a5257d3a1f6fc09d Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 9 Feb 2022 12:47:08 -0600 Subject: tracehook: Remove tracehook.h Now that all of the definitions have moved out of tracehook.h into ptrace.h, sched/signal.h, resume_user_mode.h there is nothing left in tracehook.h so remove it. Update the few files that were depending upon tracehook.h to bring in definitions to use the headers they need directly. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-13-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- MAINTAINERS | 1 - arch/s390/include/asm/entry-common.h | 1 - arch/s390/kernel/ptrace.c | 1 - arch/s390/kernel/signal.c | 1 - arch/x86/kernel/ptrace.c | 1 - arch/x86/kernel/signal.c | 1 - fs/coredump.c | 1 - fs/exec.c | 1 - fs/io-wq.c | 2 +- fs/io_uring.c | 1 - fs/proc/array.c | 1 - fs/proc/base.c | 1 - include/linux/tracehook.h | 56 ------------------------------------ kernel/exit.c | 3 +- kernel/livepatch/transition.c | 1 - kernel/seccomp.c | 1 - kernel/signal.c | 2 +- security/apparmor/domain.c | 1 - security/selinux/hooks.c | 1 - 19 files changed, 4 insertions(+), 74 deletions(-) delete mode 100644 include/linux/tracehook.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index ea3e6c914384..2f16a23a26a2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15623,7 +15623,6 @@ F: arch/*/ptrace*.c F: include/asm-generic/syscall.h F: include/linux/ptrace.h F: include/linux/regset.h -F: include/linux/tracehook.h F: include/uapi/linux/ptrace.h F: include/uapi/linux/ptrace.h F: kernel/ptrace.c diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 17aead80aadb..eabab24b71dd 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 0ea3d02b378d..641fa36f6101 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index ea9e5e8182cd..8b7b5f80f722 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 6d2244c94799..419768d7605e 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index de3d5b5724d8..e439eb14325f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/coredump.c b/fs/coredump.c index 1c060c0a2d72..f54c5e316df3 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/exec.c b/fs/exec.c index 79f2c9483302..e23e2d430485 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/io-wq.c b/fs/io-wq.c index 8b9147873c2c..cb3cb1833ef6 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/fs/io_uring.c b/fs/io_uring.c index d5fbae1030f9..6c7eacc0ebd6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -78,7 +78,6 @@ #include #include #include -#include #include #include diff --git a/fs/proc/array.c b/fs/proc/array.c index fd8b0c12b2cb..eb815759842c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -88,7 +88,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/proc/base.c b/fs/proc/base.c index d654ce7150fd..01fb37ecc89f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -74,7 +74,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h deleted file mode 100644 index 9f6b3fd1880a..000000000000 --- a/include/linux/tracehook.h +++ /dev/null @@ -1,56 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Tracing hooks - * - * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. - * - * This file defines hook entry points called by core code where - * user tracing/debugging support might need to do something. These - * entry points are called tracehook_*(). Each hook declared below - * has a detailed kerneldoc comment giving the context (locking et - * al) from which it is called, and the meaning of its return value. - * - * Each function here typically has only one call site, so it is ok - * to have some nontrivial tracehook_*() inlines. In all cases, the - * fast path when no tracing is enabled should be very short. - * - * The purpose of this file and the tracehook_* layer is to consolidate - * the interface that the kernel core and arch code uses to enable any - * user debugging or tracing facility (such as ptrace). The interfaces - * here are carefully documented so that maintainers of core and arch - * code do not need to think about the implementation details of the - * tracing facilities. Likewise, maintainers of the tracing code do not - * need to understand all the calling core or arch code in detail, just - * documented circumstances of each call, such as locking conditions. - * - * If the calling core code changes so that locking is different, then - * it is ok to change the interface documented here. The maintainer of - * core code changing should notify the maintainers of the tracing code - * that they need to work out the change. - * - * Some tracehook_*() inlines take arguments that the current tracing - * implementations might not necessarily use. These function signatures - * are chosen to pass in all the information that is on hand in the - * caller and might conceivably be relevant to a tracer, so that the - * core code won't have to be updated when tracing adds more features. - * If a call site changes so that some of those parameters are no longer - * already on hand without extra work, then the tracehook_* interface - * can change so there is no make-work burden on the core code. The - * maintainer of core code changing should notify the maintainers of the - * tracing code that they need to work out the change. - */ - -#ifndef _LINUX_TRACEHOOK_H -#define _LINUX_TRACEHOOK_H 1 - -#include -#include -#include -#include -#include -#include -struct linux_binprm; - - - -#endif /* */ diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..9326d1f97fc7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -49,7 +49,8 @@ #include /* for audit_free() */ #include #include -#include +#include +#include #include #include #include diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 5683ac0d2566..df808d97d84f 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -9,7 +9,6 @@ #include #include -#include #include "core.h" #include "patch.h" #include "transition.h" diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 4d8f44a17727..63198086ee83 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/signal.c b/kernel/signal.c index 8632b88982c9..c2dee5420567 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 583680f6cd81..a29e69d2c300 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 5b6895e4fc29..4d2cd6b9f6fc 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3-71-gd317 From 6789ab9668d920d7be636cb994d85be9a816f9d5 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Thu, 10 Mar 2022 13:16:55 -0800 Subject: compiler_types: Refactor the use of btf_type_tag attribute. Previous patches have introduced the compiler attribute btf_type_tag for __user and __percpu. The availability of this attribute depends on some CONFIGs and compiler support. This patch refactors the use of btf_type_tag by introducing BTF_TYPE_TAG, which hides all the dependencies. No functional change. Suggested-by: Andrii Nakryiko Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220310211655.3173786-1-haoluo@google.com --- include/linux/compiler_types.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index b9a8ae9440c7..1bc760ba400c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -4,6 +4,13 @@ #ifndef __ASSEMBLY__ +#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value))) +#else +# define BTF_TYPE_TAG(value) /* nothing */ +#endif + #ifdef __CHECKER__ /* address spaces */ # define __kernel __attribute__((address_space(0))) @@ -31,19 +38,11 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __kernel # ifdef STRUCTLEAK_PLUGIN # define __user __attribute__((user)) -# elif defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ - __has_attribute(btf_type_tag) -# define __user __attribute__((btf_type_tag("user"))) # else -# define __user +# define __user BTF_TYPE_TAG(user) # endif # define __iomem -# if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ - __has_attribute(btf_type_tag) -# define __percpu __attribute__((btf_type_tag("percpu"))) -# else -# define __percpu -# endif +# define __percpu BTF_TYPE_TAG(percpu) # define __rcu # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 -- cgit v1.2.3-71-gd317 From 271907ee2f29cd1078fd219f0778fd824fb1971c Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 17 Jan 2022 15:14:44 +0200 Subject: net/mlx5: Query the maximum MCIA register read size from firmware The MCIA register supports either 12 or 32 dwords, use the correct value by querying the capability from the MCAM register. Signed-off-by: Gal Pressman Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 8 +++++++- include/linux/mlx5/mlx5_ifc.h | 4 +++- include/linux/mlx5/port.h | 1 - 3 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 289b29a23418..418ab777f6e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -365,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset *offset -= MLX5_EEPROM_PAGE_LENGTH; } +static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev) +{ + /* mcia supports either 12 dwords or 32 dwords */ + return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32); +} + static int mlx5_query_mcia(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, u8 *data) { @@ -374,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev, void *ptr; u16 size; - size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES); + size = min_t(int, params->size, mlx5_mcia_max_bytes(dev)); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, size, size); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 318fae4b3560..745107ff681d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9691,7 +9691,9 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x6a]; + u8 reserved_at_0[0x5d]; + u8 mcia_32dwords[0x1]; + u8 reserved_at_5e[0xc]; u8 reset_state[0x1]; u8 ptpcyc2realtime_modify[0x1]; u8 reserved_at_6c[0x2]; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 77ea4f9c5265..402413b3e914 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -56,7 +56,6 @@ enum mlx5_an_status { MLX5_AN_LINK_DOWN = 4, }; -#define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 -- cgit v1.2.3-71-gd317 From fcb610a86c53dfcfbb2aa62e704481112752f367 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 17 Jan 2022 15:53:06 +0200 Subject: net/mlx5: Parse module mapping using mlx5_ifc The assumption that the first byte in the module mapping dword is the module number shouldn't be hard-coded in the driver, but come from mlx5_ifc structs. While at it, fix the incorrect width for the 'rx_lane' and 'tx_lane' fields. Signed-off-by: Gal Pressman Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 6 +++--- include/linux/mlx5/mlx5_ifc.h | 8 ++++---- include/linux/mlx5/port.h | 1 - 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 418ab777f6e8..493cacb4610b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -275,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; - int module_mapping; int err; MLX5_SET(pmlp_reg, in, local_port, 1); @@ -284,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) if (err) return err; - module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping); - *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK; + *module_num = MLX5_GET(lane_2_module_mapping, + MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping), + module); return 0; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 745107ff681d..91b7f730ed91 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9888,10 +9888,10 @@ struct mlx5_ifc_pcmr_reg_bits { }; struct mlx5_ifc_lane_2_module_mapping_bits { - u8 reserved_at_0[0x6]; - u8 rx_lane[0x2]; - u8 reserved_at_8[0x6]; - u8 tx_lane[0x2]; + u8 reserved_at_0[0x4]; + u8 rx_lane[0x4]; + u8 reserved_at_8[0x4]; + u8 tx_lane[0x4]; u8 reserved_at_10[0x8]; u8 module[0x8]; }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 402413b3e914..28a928b0684b 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -56,7 +56,6 @@ enum mlx5_an_status { MLX5_AN_LINK_DOWN = 4, }; -#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 -- cgit v1.2.3-71-gd317 From 286f950545e0d9c8aa802cbfc9676860bbc49179 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 16 Feb 2022 15:21:58 +0530 Subject: coresight: Drop unused 'none' enum value for each component CORESIGHT_DEV_TYPE_NONE/CORESIGHT_DEV_SUBTYPE_XXXX_NONE values are not used any where. Actual enumeration can start from 0. Just drop these unused enum values. Cc: Mathieu Poirier Cc: Suzuki K Poulose Cc: Mike Leach Cc: Leo Yan Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1645005118-10561-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Mathieu Poirier Signed-off-by: Suzuki K Poulose --- drivers/hwtracing/coresight/coresight-core.c | 3 --- include/linux/coresight.h | 5 ----- 2 files changed, 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 88653d1c06a4..af00dca8d1ac 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1278,9 +1278,6 @@ static struct attribute *coresight_source_attrs[] = { ATTRIBUTE_GROUPS(coresight_source); static struct device_type coresight_dev_type[] = { - { - .name = "none", - }, { .name = "sink", .groups = coresight_sink_groups, diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 93a2922b7653..9f445f09fcfe 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -36,7 +36,6 @@ extern struct bus_type coresight_bustype; enum coresight_dev_type { - CORESIGHT_DEV_TYPE_NONE, CORESIGHT_DEV_TYPE_SINK, CORESIGHT_DEV_TYPE_LINK, CORESIGHT_DEV_TYPE_LINKSINK, @@ -46,7 +45,6 @@ enum coresight_dev_type { }; enum coresight_dev_subtype_sink { - CORESIGHT_DEV_SUBTYPE_SINK_NONE, CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, @@ -54,21 +52,18 @@ enum coresight_dev_subtype_sink { }; enum coresight_dev_subtype_link { - CORESIGHT_DEV_SUBTYPE_LINK_NONE, CORESIGHT_DEV_SUBTYPE_LINK_MERG, CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, CORESIGHT_DEV_SUBTYPE_LINK_FIFO, }; enum coresight_dev_subtype_source { - CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, }; enum coresight_dev_subtype_helper { - CORESIGHT_DEV_SUBTYPE_HELPER_NONE, CORESIGHT_DEV_SUBTYPE_HELPER_CATU, }; -- cgit v1.2.3-71-gd317 From 3a73333fb370f7b65de9d94c53df503642bda789 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Thu, 3 Mar 2022 17:05:34 -0500 Subject: tracing: Add TRACE_CUSTOM_EVENT() macro To make it really easy to add custom events from modules, add a TRACE_CUSTOM_EVENT() macro that acts just like the TRACE_EVENT() macro, but creates a custom event to an already existing tracepoint. The trace_custom_sched.[ch] has been updated to use this new macro to show how simple it is. Link: https://lkml.kernel.org/r/20220303220625.738622494@goodmis.org Cc: Ingo Molnar Cc: Andrew Morton Cc: Joel Fernandes Cc: Peter Zijlstra Cc: Masami Hiramatsu Cc: Tom Zanussi Signed-off-by: Steven Rostedt (Google) --- include/linux/trace_events.h | 24 ++- include/trace/define_custom_trace.h | 77 +++++++++ include/trace/trace_custom_events.h | 221 +++++++++++++++++++++++++ samples/trace_events/Makefile | 2 +- samples/trace_events/trace_custom_sched.c | 259 +++--------------------------- samples/trace_events/trace_custom_sched.h | 95 +++++++++++ 6 files changed, 441 insertions(+), 237 deletions(-) create mode 100644 include/trace/define_custom_trace.h create mode 100644 include/trace/trace_custom_events.h create mode 100644 samples/trace_events/trace_custom_sched.h (limited to 'include/linux') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 70c069aef02c..9b09fd633d48 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -315,6 +315,7 @@ enum { TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, TRACE_EVENT_FL_EPROBE_BIT, + TRACE_EVENT_FL_CUSTOM_BIT, }; /* @@ -328,6 +329,9 @@ enum { * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe * EPROBE - Event is an event probe + * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) + * This is set when the custom event has not been attached + * to a tracepoint yet, then it is cleared when it is. */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -339,6 +343,7 @@ enum { TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), + TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -440,7 +445,9 @@ static inline bool bpf_prog_array_valid(struct trace_event_call *call) static inline const char * trace_event_name(struct trace_event_call *call) { - if (call->flags & TRACE_EVENT_FL_TRACEPOINT) + if (call->flags & TRACE_EVENT_FL_CUSTOM) + return call->name; + else if (call->flags & TRACE_EVENT_FL_TRACEPOINT) return call->tp ? call->tp->name : NULL; else return call->name; @@ -901,3 +908,18 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, #endif #endif /* _LINUX_TRACE_EVENT_H */ + +/* + * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection. + * This is due to the way trace custom events work. If a file includes two + * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include + * will override the TRACE_CUSTOM_EVENT and break the second include. + */ + +#ifndef TRACE_CUSTOM_EVENT + +#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) +#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print) + +#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */ diff --git a/include/trace/define_custom_trace.h b/include/trace/define_custom_trace.h new file mode 100644 index 000000000000..5827a4c92c74 --- /dev/null +++ b/include/trace/define_custom_trace.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace files that want to automate creation of all tracepoints defined + * in their file should include this file. The following are macros that the + * trace file may define: + * + * TRACE_SYSTEM defines the system the tracepoint is for + * + * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h + * This macro may be defined to tell define_trace.h what file to include. + * Note, leave off the ".h". + * + * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace + * then this macro can define the path to use. Note, the path is relative to + * define_trace.h, not the file including it. Full path names for out of tree + * modules must be used. + */ + +#ifdef CREATE_CUSTOM_TRACE_EVENTS + +/* Prevent recursion */ +#undef CREATE_CUSTOM_TRACE_EVENTS + +#include + +#undef TRACE_CUSTOM_EVENT +#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) + +#undef TRACE_INCLUDE +#undef __TRACE_INCLUDE + +#ifndef TRACE_INCLUDE_FILE +# define TRACE_INCLUDE_FILE TRACE_SYSTEM +# define UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifndef TRACE_INCLUDE_PATH +# define __TRACE_INCLUDE(system) +# define UNDEF_TRACE_INCLUDE_PATH +#else +# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) +#endif + +# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system) + +/* Let the trace headers be reread */ +#define TRACE_CUSTOM_MULTI_READ + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#ifdef TRACEPOINTS_ENABLED +#include +#endif + +#undef TRACE_CUSTOM_EVENT +#undef DECLARE_CUSTOM_EVENT_CLASS +#undef DEFINE_CUSTOM_EVENT +#undef TRACE_CUSTOM_MULTI_READ + +/* Only undef what we defined in this file */ +#ifdef UNDEF_TRACE_INCLUDE_FILE +# undef TRACE_INCLUDE_FILE +# undef UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifdef UNDEF_TRACE_INCLUDE_PATH +# undef TRACE_INCLUDE_PATH +# undef UNDEF_TRACE_INCLUDE_PATH +#endif + +/* We may be processing more files */ +#define CREATE_CUSTOM_TRACE_POINTS + +#endif /* CREATE_CUSTOM_TRACE_POINTS */ diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h new file mode 100644 index 000000000000..b567c7202339 --- /dev/null +++ b/include/trace/trace_custom_events.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is similar to the trace_events.h file, but is to only + * create custom trace events to be attached to existing tracepoints. + * Where as the TRACE_EVENT() macro (from trace_events.h) will create + * both the trace event and the tracepoint it will attach the event to, + * TRACE_CUSTOM_EVENT() is to create only a custom version of an existing + * trace event (created by TRACE_EVENT() or DEFINE_EVENT()), and will + * be placed in the "custom" system. + */ + +#include + +/* All custom events are placed in the custom group */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM custom + +#ifndef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR TRACE_SYSTEM +#endif + +/* The init stage creates the system string and enum mappings */ + +#include "stages/init.h" + +#undef TRACE_CUSTOM_EVENT +#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) \ + DECLARE_CUSTOM_EVENT_CLASS(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_CUSTOM_EVENT(name, name, PARAMS(proto), PARAMS(args)); + +/* Stage 1 creates the structure of the recorded event layout */ + +#include "stages/stage1_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ + struct trace_custom_event_raw_##name { \ + struct trace_entry ent; \ + tstruct \ + char __data[]; \ + }; \ + \ + static struct trace_event_class custom_event_class_##name; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) \ + static struct trace_event_call __used \ + __attribute__((__aligned__(4))) custom_event_##name + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 2 creates the custom class */ + +#include "stages/stage2_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + struct trace_custom_event_data_offsets_##call { \ + tstruct; \ + }; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 3 create the way to print the custom event */ + +#include "stages/stage3_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace enum print_line_t \ +trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ +{ \ + struct trace_seq *s = &iter->seq; \ + struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ + struct trace_custom_event_raw_##call *field; \ + int ret; \ + \ + field = (typeof(field))iter->ent; \ + \ + ret = trace_raw_output_prep(iter, trace_event); \ + if (ret != TRACE_TYPE_HANDLED) \ + return ret; \ + \ + trace_event_printf(iter, print); \ + \ + return trace_handle_return(s); \ +} \ +static struct trace_event_functions trace_custom_event_type_funcs_##call = { \ + .trace = trace_custom_raw_output_##call, \ +}; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 4 creates the offset layout for the fields */ + +#include "stages/stage4_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ +static struct trace_event_fields trace_custom_event_fields_##call[] = { \ + tstruct \ + {} }; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 5 creates the helper function for dynamic fields */ + +#include "stages/stage5_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static inline notrace int trace_custom_event_get_offsets_##call( \ + struct trace_custom_event_data_offsets_##call *__data_offsets, proto) \ +{ \ + int __data_size = 0; \ + int __maybe_unused __item_length; \ + struct trace_custom_event_raw_##call __maybe_unused *entry; \ + \ + tstruct; \ + \ + return __data_size; \ +} + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 6 creates the probe function that records the event */ + +#include "stages/stage6_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + \ +static notrace void \ +trace_custom_event_raw_event_##call(void *__data, proto) \ +{ \ + struct trace_event_file *trace_file = __data; \ + struct trace_custom_event_data_offsets_##call __maybe_unused __data_offsets; \ + struct trace_event_buffer fbuffer; \ + struct trace_custom_event_raw_##call *entry; \ + int __data_size; \ + \ + if (trace_trigger_soft_disabled(trace_file)) \ + return; \ + \ + __data_size = trace_custom_event_get_offsets_##call(&__data_offsets, args); \ + \ + entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ + sizeof(*entry) + __data_size); \ + \ + if (!entry) \ + return; \ + \ + tstruct \ + \ + { assign; } \ + \ + trace_event_buffer_commit(&fbuffer); \ +} +/* + * The ftrace_test_custom_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \ +static inline void ftrace_test_custom_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(trace_custom_event_raw_event_##template); \ +} + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 7 creates the actual class and event structure for the custom event */ + +#include "stages/stage7_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static char custom_print_fmt_##call[] = print; \ +static struct trace_event_class __used __refdata custom_event_class_##call = { \ + .system = TRACE_SYSTEM_STRING, \ + .fields_array = trace_custom_event_fields_##call, \ + .fields = LIST_HEAD_INIT(custom_event_class_##call.fields),\ + .raw_init = trace_event_raw_init, \ + .probe = trace_custom_event_raw_event_##call, \ + .reg = trace_event_reg, \ +}; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \ + \ +static struct trace_event_call __used custom_event_##call = { \ + .name = #call, \ + .class = &custom_event_class_##template, \ + .event.funcs = &trace_custom_event_type_funcs_##template, \ + .print_fmt = custom_print_fmt_##template, \ + .flags = TRACE_EVENT_FL_CUSTOM, \ +}; \ +static inline int trace_custom_event_##call##_update(struct tracepoint *tp) \ +{ \ + if (tp->name && strcmp(tp->name, #call) == 0) { \ + custom_event_##call.tp = tp; \ + custom_event_##call.flags = TRACE_EVENT_FL_TRACEPOINT; \ + return 1; \ + } \ + return 0; \ +} \ +static struct trace_event_call __used \ +__section("_ftrace_events") *__custom_event_##call = &custom_event_##call + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) diff --git a/samples/trace_events/Makefile b/samples/trace_events/Makefile index e98afc447fe1..b3808bb4cf8b 100644 --- a/samples/trace_events/Makefile +++ b/samples/trace_events/Makefile @@ -11,7 +11,7 @@ # Here trace-events-sample.c does the CREATE_TRACE_POINTS. # CFLAGS_trace-events-sample.o := -I$(src) +CFLAGS_trace_custom_sched.o := -I$(src) obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace-events-sample.o - obj-$(CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS) += trace_custom_sched.o diff --git a/samples/trace_events/trace_custom_sched.c b/samples/trace_events/trace_custom_sched.c index 70a12c32ff99..b99d9ab7db85 100644 --- a/samples/trace_events/trace_custom_sched.c +++ b/samples/trace_events/trace_custom_sched.c @@ -11,256 +11,45 @@ #include #include #include -#include - -#define THIS_SYSTEM "custom_sched" - -#define SCHED_PRINT_FMT \ - C("prev_prio=%d next_pid=%d next_prio=%d", REC->prev_prio, REC->next_pid, \ - REC->next_prio) - -#define SCHED_WAKING_FMT \ - C("pid=%d prio=%d", REC->pid, REC->prio) - -#undef C -#define C(a, b...) a, b - -static struct trace_event_fields sched_switch_fields[] = { - { - .type = "unsigned short", - .name = "prev_prio", - .size = sizeof(short), - .align = __alignof__(short), - .is_signed = 0, - .filter_type = FILTER_OTHER, - }, - { - .type = "unsigned short", - .name = "next_prio", - .size = sizeof(short), - .align = __alignof__(short), - .is_signed = 0, - .filter_type = FILTER_OTHER, - }, - { - .type = "unsigned int", - .name = "next_prio", - .size = sizeof(int), - .align = __alignof__(int), - .is_signed = 0, - .filter_type = FILTER_OTHER, - }, - {} -}; - -struct sched_event { - struct trace_entry ent; - unsigned short prev_prio; - unsigned short next_prio; - unsigned int next_pid; -}; - -static struct trace_event_fields sched_waking_fields[] = { - { - .type = "unsigned int", - .name = "pid", - .size = sizeof(int), - .align = __alignof__(int), - .is_signed = 0, - .filter_type = FILTER_OTHER, - }, - { - .type = "unsigned short", - .name = "prio", - .size = sizeof(short), - .align = __alignof__(short), - .is_signed = 0, - .filter_type = FILTER_OTHER, - }, - {} -}; - -struct wake_event { - struct trace_entry ent; - unsigned int pid; - unsigned short prio; -}; - -static void sched_switch_probe(void *data, bool preempt, struct task_struct *prev, - struct task_struct *next) -{ - struct trace_event_file *trace_file = data; - struct trace_event_buffer fbuffer; - struct sched_event *entry; - - if (trace_trigger_soft_disabled(trace_file)) - return; - - entry = trace_event_buffer_reserve(&fbuffer, trace_file, - sizeof(*entry)); - - if (!entry) - return; - - entry->prev_prio = prev->prio; - entry->next_prio = next->prio; - entry->next_pid = next->pid; - - trace_event_buffer_commit(&fbuffer); -} - -static struct trace_event_class sched_switch_class = { - .system = THIS_SYSTEM, - .reg = trace_event_reg, - .fields_array = sched_switch_fields, - .fields = LIST_HEAD_INIT(sched_switch_class.fields), - .probe = sched_switch_probe, -}; - -static void sched_waking_probe(void *data, struct task_struct *t) -{ - struct trace_event_file *trace_file = data; - struct trace_event_buffer fbuffer; - struct wake_event *entry; - - if (trace_trigger_soft_disabled(trace_file)) - return; - - entry = trace_event_buffer_reserve(&fbuffer, trace_file, - sizeof(*entry)); - - if (!entry) - return; - - entry->prio = t->prio; - entry->pid = t->pid; - - trace_event_buffer_commit(&fbuffer); -} - -static struct trace_event_class sched_waking_class = { - .system = THIS_SYSTEM, - .reg = trace_event_reg, - .fields_array = sched_waking_fields, - .fields = LIST_HEAD_INIT(sched_waking_class.fields), - .probe = sched_waking_probe, -}; - -static enum print_line_t sched_switch_output(struct trace_iterator *iter, int flags, - struct trace_event *trace_event) -{ - struct trace_seq *s = &iter->seq; - struct sched_event *REC = (struct sched_event *)iter->ent; - int ret; - - ret = trace_raw_output_prep(iter, trace_event); - if (ret != TRACE_TYPE_HANDLED) - return ret; - - trace_seq_printf(s, SCHED_PRINT_FMT); - trace_seq_putc(s, '\n'); - return trace_handle_return(s); -} - -static struct trace_event_functions sched_switch_funcs = { - .trace = sched_switch_output, -}; - -static enum print_line_t sched_waking_output(struct trace_iterator *iter, int flags, - struct trace_event *trace_event) -{ - struct trace_seq *s = &iter->seq; - struct wake_event *REC = (struct wake_event *)iter->ent; - int ret; - - ret = trace_raw_output_prep(iter, trace_event); - if (ret != TRACE_TYPE_HANDLED) - return ret; - - trace_seq_printf(s, SCHED_WAKING_FMT); - trace_seq_putc(s, '\n'); - - return trace_handle_return(s); -} - -static struct trace_event_functions sched_waking_funcs = { - .trace = sched_waking_output, -}; - -#undef C -#define C(a, b...) #a "," __stringify(b) +/* + * Must include the event header that the custom event will attach to, + * from the C file, and not in the custom header file. + */ +#include -static struct trace_event_call sched_switch_call = { - .class = &sched_switch_class, - .event = { - .funcs = &sched_switch_funcs, - }, - .print_fmt = SCHED_PRINT_FMT, - .module = THIS_MODULE, - .flags = TRACE_EVENT_FL_TRACEPOINT, -}; +/* Declare CREATE_CUSTOM_TRACE_EVENTS before including custom header */ +#define CREATE_CUSTOM_TRACE_EVENTS -static struct trace_event_call sched_waking_call = { - .class = &sched_waking_class, - .event = { - .funcs = &sched_waking_funcs, - }, - .print_fmt = SCHED_WAKING_FMT, - .module = THIS_MODULE, - .flags = TRACE_EVENT_FL_TRACEPOINT, -}; +#include "trace_custom_sched.h" +/* + * As the trace events are not exported to modules, the use of + * for_each_kernel_tracepoint() is needed to find the trace event + * to attach to. The fct() function below, is a callback that + * will be called for every event. + * + * Helper functions are created by the TRACE_CUSTOM_EVENT() macro + * update the event. Those are of the form: + * + * trace_custom_event__update() + * + * Where is the event to attach. + */ static void fct(struct tracepoint *tp, void *priv) { - if (tp->name && strcmp(tp->name, "sched_switch") == 0) - sched_switch_call.tp = tp; - else if (tp->name && strcmp(tp->name, "sched_waking") == 0) - sched_waking_call.tp = tp; -} - -static int add_event(struct trace_event_call *call) -{ - int ret; - - ret = register_trace_event(&call->event); - if (WARN_ON(!ret)) - return -ENODEV; - - ret = trace_add_event_call(call); - if (WARN_ON(ret)) - unregister_trace_event(&call->event); - - return ret; + trace_custom_event_sched_switch_update(tp); + trace_custom_event_sched_waking_update(tp); } static int __init trace_sched_init(void) { - int ret; - - check_trace_callback_type_sched_switch(sched_switch_probe); - check_trace_callback_type_sched_waking(sched_waking_probe); - for_each_kernel_tracepoint(fct, NULL); - - ret = add_event(&sched_switch_call); - if (ret) - return ret; - - ret = add_event(&sched_waking_call); - if (ret) - trace_remove_event_call(&sched_switch_call); - - return ret; + return 0; } static void __exit trace_sched_exit(void) { - trace_set_clr_event(THIS_SYSTEM, "sched_switch", 0); - trace_set_clr_event(THIS_SYSTEM, "sched_waking", 0); - - trace_remove_event_call(&sched_switch_call); - trace_remove_event_call(&sched_waking_call); } module_init(trace_sched_init); diff --git a/samples/trace_events/trace_custom_sched.h b/samples/trace_events/trace_custom_sched.h new file mode 100644 index 000000000000..a3d14de6a2e5 --- /dev/null +++ b/samples/trace_events/trace_custom_sched.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Like the headers that use TRACE_EVENT(), the TRACE_CUSTOM_EVENT() + * needs a header that allows for multiple inclusions. + * + * Test for a unique name (here we have _TRACE_CUSTOM_SCHED_H), + * also allowing to continue if TRACE_CUSTOM_MULTI_READ is defined. + */ +#if !defined(_TRACE_CUSTOM_SCHED_H) || defined(TRACE_CUSTOM_MULTI_READ) +#define _TRACE_CUSTOM_SCHED_H + +/* Include linux/trace_events.h for initial defines of TRACE_CUSTOM_EVENT() */ +#include + +/* + * TRACE_CUSTOM_EVENT() is just like TRACE_EVENT(). The first parameter + * is the event name of an existing event where the TRACE_EVENT has been included + * in the C file before including this file. + */ +TRACE_CUSTOM_EVENT(sched_switch, + + /* + * The TP_PROTO() and TP_ARGS must match the trace event + * that the custom event is using. + */ + TP_PROTO(bool preempt, + struct task_struct *prev, + struct task_struct *next), + + TP_ARGS(preempt, prev, next), + + /* + * The next fields are where the customization happens. + * The TP_STRUCT__entry() defines what will be recorded + * in the ring buffer when the custom event triggers. + * + * The rest is just like the TRACE_EVENT() macro except that + * it uses the custom entry. + */ + TP_STRUCT__entry( + __field( unsigned short, prev_prio ) + __field( unsigned short, next_prio ) + __field( pid_t, next_pid ) + ), + + TP_fast_assign( + __entry->prev_prio = prev->prio; + __entry->next_pid = next->pid; + __entry->next_prio = next->prio; + ), + + TP_printk("prev_prio=%d next_pid=%d next_prio=%d", + __entry->prev_prio, __entry->next_pid, __entry->next_prio) +) + + +TRACE_CUSTOM_EVENT(sched_waking, + + TP_PROTO(struct task_struct *p), + + TP_ARGS(p), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( unsigned short, prio ) + ), + + TP_fast_assign( + __entry->pid = p->pid; + __entry->prio = p->prio; + ), + + TP_printk("pid=%d prio=%d", __entry->pid, __entry->prio) +) +#endif +/* + * Just like the headers that create TRACE_EVENTs, the below must + * be outside the protection of the above #if block. + */ + +/* + * It is required that the Makefile includes: + * CFLAGS_.o := -I$(src) + */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +/* + * It is requred that the TRACE_INCLUDE_FILE be the same + * as this file without the ".h". + */ +#define TRACE_INCLUDE_FILE trace_custom_sched +#include -- cgit v1.2.3-71-gd317 From 380af29b8d7670c445965bd573ab219aff0c4c11 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Thu, 10 Mar 2022 21:37:09 -0500 Subject: tracing: Add snapshot at end of kernel boot up Add ftrace_boot_snapshot kernel parameter that will take a snapshot at the end of boot up just before switching over to user space (it happens during the kernel freeing of init memory). This is useful when there's interesting data that can be collected from kernel start up, but gets overridden by user space start up code. With this option, the ring buffer content from the boot up traces gets saved in the snapshot at the end of boot up. This trace can be read from: /sys/kernel/tracing/snapshot Signed-off-by: Steven Rostedt (Google) --- Documentation/admin-guide/kernel-parameters.txt | 8 ++++++++ include/linux/ftrace.h | 11 ++++++++++- kernel/trace/ftrace.c | 2 ++ kernel/trace/trace.c | 18 ++++++++++++++++++ 4 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f5a27f067db9..f6b7ee64ace8 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1435,6 +1435,14 @@ as early as possible in order to facilitate early boot debugging. + ftrace_boot_snapshot + [FTRACE] On boot up, a snapshot will be taken of the + ftrace ring buffer that can be read at: + /sys/kernel/tracing/snapshot. + This is useful if you need tracing information from kernel + boot up that is likely to be overridden by user space + start up functionality. + ftrace_dump_on_oops[=orig_cpu] [FTRACE] will dump the trace buffers on oops. If no parameter is passed, ftrace will dump diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9999e29187de..37b619185ec9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -30,6 +30,12 @@ #define ARCH_SUPPORTS_FTRACE_OPS 0 #endif +#ifdef CONFIG_TRACING +extern void ftrace_boot_snapshot(void); +#else +static inline void ftrace_boot_snapshot(void) { } +#endif + #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops; struct ftrace_regs; @@ -215,7 +221,10 @@ struct ftrace_ops_hash { void ftrace_free_init_mem(void); void ftrace_free_mem(struct module *mod, void *start, void *end); #else -static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_init_mem(void) +{ + ftrace_boot_snapshot(); +} static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f9feb197b2da..4e29bd1cf151 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -7096,6 +7096,8 @@ void __init ftrace_free_init_mem(void) void *start = (void *)(&__init_begin); void *end = (void *)(&__init_end); + ftrace_boot_snapshot(); + ftrace_free_mem(NULL, start, end); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7c85ce9ffdc3..eaf7d30ca6f1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -185,6 +185,7 @@ static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; +static bool snapshot_at_boot; static int __init set_cmdline_ftrace(char *str) { @@ -230,6 +231,15 @@ static int __init boot_alloc_snapshot(char *str) __setup("alloc_snapshot", boot_alloc_snapshot); +static int __init boot_snapshot(char *str) +{ + snapshot_at_boot = true; + boot_alloc_snapshot(str); + return 1; +} +__setup("ftrace_boot_snapshot", boot_snapshot); + + static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) @@ -10149,6 +10159,14 @@ out: return ret; } +void __init ftrace_boot_snapshot(void) +{ + if (snapshot_at_boot) { + tracing_snapshot(); + internal_trace_puts("** Boot snapshot taken **\n"); + } +} + void __init early_trace_init(void) { if (tracepoint_printk) { -- cgit v1.2.3-71-gd317 From b65700d046a60d0a29f6289e13c62d0c93689f11 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Tue, 8 Mar 2022 18:25:15 +0100 Subject: remoteproc: move rproc_da_to_va declaration to remoteproc.h The rproc_da_to_va() API is an exported function, so move its declaration from the remoteproc local remoteproc_internal.h to the public remoteproc.h file. This will allow drivers outside of the remoteproc folder to be able to use this API. Signed-off-by: Suman Anna Signed-off-by: Dave Gerlach [adjusted line numbers to apply] Signed-off-by: Drew Fustini Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220308172515.29556-1-dfustini@baylibre.com --- drivers/remoteproc/remoteproc_internal.h | 1 - include/linux/remoteproc.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index a328e634b1de..72d4d3d7d94d 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -84,7 +84,6 @@ static inline void rproc_char_device_remove(struct rproc *rproc) void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); phys_addr_t rproc_va_to_pa(void *cpu_addr); int rproc_trigger_recovery(struct rproc *rproc); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 93a1d0050fbc..b2ee325e0af1 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -675,6 +675,7 @@ void rproc_shutdown(struct rproc *rproc); int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); void rproc_coredump_using_sections(struct rproc *rproc); int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); int rproc_coredump_add_custom_segment(struct rproc *rproc, -- cgit v1.2.3-71-gd317 From c993ee0f9f81caf5767a50d1faeba39a0dc82af2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 11 Mar 2022 13:23:31 +0000 Subject: watch_queue: Fix filter limit check In watch_queue_set_filter(), there are a couple of places where we check that the filter type value does not exceed what the type_filter bitmap can hold. One place calculates the number of bits by: if (tf[i].type >= sizeof(wfilter->type_filter) * 8) which is fine, but the second does: if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) which is not. This can lead to a couple of out-of-bounds writes due to a too-large type: (1) __set_bit() on wfilter->type_filter (2) Writing more elements in wfilter->filters[] than we allocated. Fix this by just using the proper WATCH_TYPE__NR instead, which is the number of types we actually know about. The bug may cause an oops looking something like: BUG: KASAN: slab-out-of-bounds in watch_queue_set_filter+0x659/0x740 Write of size 4 at addr ffff88800d2c66bc by task watch_queue_oob/611 ... Call Trace: dump_stack_lvl+0x45/0x59 print_address_description.constprop.0+0x1f/0x150 ... kasan_report.cold+0x7f/0x11b ... watch_queue_set_filter+0x659/0x740 ... __x64_sys_ioctl+0x127/0x190 do_syscall_64+0x43/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae Allocated by task 611: kasan_save_stack+0x1e/0x40 __kasan_kmalloc+0x81/0xa0 watch_queue_set_filter+0x23a/0x740 __x64_sys_ioctl+0x127/0x190 do_syscall_64+0x43/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff88800d2c66a0 which belongs to the cache kmalloc-32 of size 32 The buggy address is located 28 bytes inside of 32-byte region [ffff88800d2c66a0, ffff88800d2c66c0) Fixes: c73be61cede5 ("pipe: Add general notification queue support") Reported-by: Jann Horn Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- include/linux/watch_queue.h | 3 ++- kernel/watch_queue.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index c994d1b2cdba..3b9a40ae8bdb 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -28,7 +28,8 @@ struct watch_type_filter { struct watch_filter { union { struct rcu_head rcu; - unsigned long type_filter[2]; /* Bitmask of accepted types */ + /* Bitmask of accepted types */ + DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ struct watch_type_filter filters[]; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 9c9eb20dd2c5..427b0318e303 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -320,7 +320,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, tf[i].info_mask & WATCH_INFO_LENGTH) goto err_filter; /* Ignore any unknown types */ - if (tf[i].type >= sizeof(wfilter->type_filter) * 8) + if (tf[i].type >= WATCH_TYPE__NR) continue; nr_filter++; } @@ -336,7 +336,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, q = wfilter->filters; for (i = 0; i < filter.nr_filters; i++) { - if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) + if (tf[i].type >= WATCH_TYPE__NR) continue; q->type = tf[i].type; -- cgit v1.2.3-71-gd317 From c13b780c4597e1e6cee3154053a196aa329b1367 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Sun, 13 Feb 2022 14:12:42 -0600 Subject: remoteproc: Change rproc_shutdown() to return a status The rproc_shutdown() function is currently not returning any error code, and any failures within rproc_stop() are not passed back to the users. Change the signature to return a success value back to the callers. The remoteproc sysfs and cdev interfaces are also updated to return back this status to userspace. Signed-off-by: Suman Anna Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220213201246.25952-2-s-anna@ti.com --- Documentation/staging/remoteproc.rst | 3 ++- drivers/remoteproc/remoteproc_cdev.c | 2 +- drivers/remoteproc/remoteproc_core.c | 9 ++++++--- drivers/remoteproc/remoteproc_sysfs.c | 2 +- include/linux/remoteproc.h | 2 +- 5 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/Documentation/staging/remoteproc.rst b/Documentation/staging/remoteproc.rst index 9cccd3dd6a4b..348ee7e508ac 100644 --- a/Documentation/staging/remoteproc.rst +++ b/Documentation/staging/remoteproc.rst @@ -49,13 +49,14 @@ might also consider using dev_archdata for this). :: - void rproc_shutdown(struct rproc *rproc) + int rproc_shutdown(struct rproc *rproc) Power off a remote processor (previously booted with rproc_boot()). In case @rproc is still being used by an additional user(s), then this function will just decrement the power refcount and exit, without really powering off the device. +Returns 0 on success, and an appropriate error value otherwise. Every call to rproc_boot() must (eventually) be accompanied by a call to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c index 4ad98b0b8caa..906ff3c4dfdd 100644 --- a/drivers/remoteproc/remoteproc_cdev.c +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -42,7 +42,7 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_ rproc->state != RPROC_ATTACHED) return -EINVAL; - rproc_shutdown(rproc); + ret = rproc_shutdown(rproc); } else if (!strncmp(cmd, "detach", len)) { if (rproc->state != RPROC_ATTACHED) return -EINVAL; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 69f51acf235e..c510125769b9 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2061,16 +2061,18 @@ EXPORT_SYMBOL(rproc_boot); * which means that the @rproc handle stays valid even after rproc_shutdown() * returns, and users can still use it with a subsequent rproc_boot(), if * needed. + * + * Return: 0 on success, and an appropriate error value otherwise */ -void rproc_shutdown(struct rproc *rproc) +int rproc_shutdown(struct rproc *rproc) { struct device *dev = &rproc->dev; - int ret; + int ret = 0; ret = mutex_lock_interruptible(&rproc->lock); if (ret) { dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); - return; + return ret; } /* if the remote proc is still needed, bail out */ @@ -2097,6 +2099,7 @@ void rproc_shutdown(struct rproc *rproc) rproc->table_ptr = NULL; out: mutex_unlock(&rproc->lock); + return ret; } EXPORT_SYMBOL(rproc_shutdown); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index abf0cd05d5e1..51a04bc6ba7a 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -206,7 +206,7 @@ static ssize_t state_store(struct device *dev, rproc->state != RPROC_ATTACHED) return -EINVAL; - rproc_shutdown(rproc); + ret = rproc_shutdown(rproc); } else if (sysfs_streq(buf, "detach")) { if (rproc->state != RPROC_ATTACHED) return -EINVAL; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index b2ee325e0af1..7c943f0a2fc4 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -671,7 +671,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...); int rproc_boot(struct rproc *rproc); -void rproc_shutdown(struct rproc *rproc); +int rproc_shutdown(struct rproc *rproc); int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); -- cgit v1.2.3-71-gd317 From 84bd3690bf54a2f2f3b8449afa022aac1957ba17 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 9 Mar 2022 19:49:37 -0800 Subject: nvdimm/namespace: Delete nd_namespace_blk Now that none of the configuration paths consider BLK namespaces, delete the BLK namespace data and supporting code. Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/164688417727.2879318.11691110761800109662.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 340 ---------------------------------------- drivers/nvdimm/label.h | 3 - drivers/nvdimm/namespace_devs.c | 227 ++------------------------- drivers/nvdimm/nd-core.h | 3 - drivers/nvdimm/nd.h | 1 - include/linux/nd.h | 26 --- 6 files changed, 13 insertions(+), 587 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 5ec9a4023df9..8c972bcb2ac3 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -968,326 +968,6 @@ static int __pmem_label_update(struct nd_region *nd_region, return rc; } -static bool is_old_resource(struct resource *res, struct resource **list, int n) -{ - int i; - - if (res->flags & DPA_RESOURCE_ADJUSTED) - return false; - for (i = 0; i < n; i++) - if (res == list[i]) - return true; - return false; -} - -static struct resource *to_resource(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label) -{ - struct resource *res; - - for_each_dpa_resource(ndd, res) { - if (res->start != nsl_get_dpa(ndd, nd_label)) - continue; - if (resource_size(res) != nsl_get_rawsize(ndd, nd_label)) - continue; - return res; - } - - return NULL; -} - -/* - * Use the presence of the type_guid as a flag to determine isetcookie - * usage and nlabel + position policy for blk-aperture namespaces. - */ -static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, - u64 isetcookie) -{ - if (efi_namespace_label_has(ndd, type_guid)) { - nsl_set_isetcookie(ndd, nd_label, isetcookie); - return; - } - nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ -} - -bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, - u64 isetcookie) -{ - if (!efi_namespace_label_has(ndd, type_guid)) - return true; - - if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) { - dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie, - nsl_get_isetcookie(ndd, nd_label)); - return false; - } - - return true; -} - -static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, int nlabel, - bool first) -{ - if (!efi_namespace_label_has(ndd, type_guid)) { - nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ - return; - } - nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff); -} - -static void nsl_set_blk_position(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, - bool first) -{ - if (!efi_namespace_label_has(ndd, type_guid)) { - nsl_set_position(ndd, nd_label, 0); - return; - } - nsl_set_position(ndd, nd_label, first ? 0 : 0xffff); -} - -/* - * 1/ Account all the labels that can be freed after this update - * 2/ Allocate and write the label to the staging (next) index - * 3/ Record the resources in the namespace device - */ -static int __blk_label_update(struct nd_region *nd_region, - struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk, - int num_labels) -{ - int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO; - struct nd_interleave_set *nd_set = nd_region->nd_set; - struct nd_namespace_common *ndns = &nsblk->common; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_label *nd_label; - struct nd_label_ent *label_ent, *e; - struct nd_namespace_index *nsindex; - unsigned long *free, *victim_map = NULL; - struct resource *res, **old_res_list; - struct nd_label_id label_id; - int min_dpa_idx = 0; - LIST_HEAD(list); - u32 nslot, slot; - - if (!preamble_next(ndd, &nsindex, &free, &nslot)) - return -ENXIO; - - old_res_list = nsblk->res; - nfree = nd_label_nfree(ndd); - old_num_resources = nsblk->num_resources; - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - - /* - * We need to loop over the old resources a few times, which seems a - * bit inefficient, but we need to know that we have the label - * space before we start mutating the tracking structures. - * Otherwise the recovery method of last resort for userspace is - * disable and re-enable the parent region. - */ - alloc = 0; - for_each_dpa_resource(ndd, res) { - if (strcmp(res->name, label_id.id) != 0) - continue; - if (!is_old_resource(res, old_res_list, old_num_resources)) - alloc++; - } - - victims = 0; - if (old_num_resources) { - /* convert old local-label-map to dimm-slot victim-map */ - victim_map = bitmap_zalloc(nslot, GFP_KERNEL); - if (!victim_map) - return -ENOMEM; - - /* mark unused labels for garbage collection */ - for_each_clear_bit_le(slot, free, nslot) { - nd_label = to_label(ndd, slot); - if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) - continue; - res = to_resource(ndd, nd_label); - if (res && is_old_resource(res, old_res_list, - old_num_resources)) - continue; - slot = to_slot(ndd, nd_label); - set_bit(slot, victim_map); - victims++; - } - } - - /* don't allow updates that consume the last label */ - if (nfree - alloc < 0 || nfree - alloc + victims < 1) { - dev_info(&nsblk->common.dev, "insufficient label space\n"); - bitmap_free(victim_map); - return -ENOSPC; - } - /* from here on we need to abort on error */ - - - /* assign all resources to the namespace before writing the labels */ - nsblk->res = NULL; - nsblk->num_resources = 0; - for_each_dpa_resource(ndd, res) { - if (strcmp(res->name, label_id.id) != 0) - continue; - if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) { - rc = -ENOMEM; - goto abort; - } - } - - /* release slots associated with any invalidated UUIDs */ - mutex_lock(&nd_mapping->lock); - list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) - if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) { - reap_victim(nd_mapping, label_ent); - list_move(&label_ent->list, &list); - } - mutex_unlock(&nd_mapping->lock); - - /* - * Find the resource associated with the first label in the set - * per the v1.2 namespace specification. - */ - for (i = 0; i < nsblk->num_resources; i++) { - struct resource *min = nsblk->res[min_dpa_idx]; - - res = nsblk->res[i]; - if (res->start < min->start) - min_dpa_idx = i; - } - - for (i = 0; i < nsblk->num_resources; i++) { - size_t offset; - - res = nsblk->res[i]; - if (is_old_resource(res, old_res_list, old_num_resources)) - continue; /* carry-over */ - slot = nd_label_alloc_slot(ndd); - if (slot == UINT_MAX) { - rc = -ENXIO; - goto abort; - } - dev_dbg(ndd->dev, "allocated: %d\n", slot); - - nd_label = to_label(ndd, slot); - memset(nd_label, 0, sizeof_namespace_label(ndd)); - nsl_set_uuid(ndd, nd_label, nsblk->uuid); - nsl_set_name(ndd, nd_label, nsblk->alt_name); - nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL); - - nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources, - i == min_dpa_idx); - nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx); - nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2); - - nsl_set_dpa(ndd, nd_label, res->start); - nsl_set_rawsize(ndd, nd_label, resource_size(res)); - nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); - nsl_set_slot(ndd, nd_label, slot); - nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); - nsl_set_claim_class(ndd, nd_label, ndns->claim_class); - nsl_calculate_checksum(ndd, nd_label); - - /* update label */ - offset = nd_label_offset(ndd, nd_label); - rc = nvdimm_set_config_data(ndd, offset, nd_label, - sizeof_namespace_label(ndd)); - if (rc < 0) - goto abort; - } - - /* free up now unused slots in the new index */ - for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) { - dev_dbg(ndd->dev, "free: %d\n", slot); - nd_label_free_slot(ndd, slot); - } - - /* update index */ - rc = nd_label_write_index(ndd, ndd->ns_next, - nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0); - if (rc) - goto abort; - - /* - * Now that the on-dimm labels are up to date, fix up the tracking - * entries in nd_mapping->labels - */ - nlabel = 0; - mutex_lock(&nd_mapping->lock); - list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { - nd_label = label_ent->label; - if (!nd_label) - continue; - nlabel++; - if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) - continue; - nlabel--; - list_move(&label_ent->list, &list); - label_ent->label = NULL; - } - list_splice_tail_init(&list, &nd_mapping->labels); - mutex_unlock(&nd_mapping->lock); - - if (nlabel + nsblk->num_resources > num_labels) { - /* - * Bug, we can't end up with more resources than - * available labels - */ - WARN_ON_ONCE(1); - rc = -ENXIO; - goto out; - } - - mutex_lock(&nd_mapping->lock); - label_ent = list_first_entry_or_null(&nd_mapping->labels, - typeof(*label_ent), list); - if (!label_ent) { - WARN_ON(1); - mutex_unlock(&nd_mapping->lock); - rc = -ENXIO; - goto out; - } - for_each_clear_bit_le(slot, free, nslot) { - nd_label = to_label(ndd, slot); - if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid)) - continue; - res = to_resource(ndd, nd_label); - res->flags &= ~DPA_RESOURCE_ADJUSTED; - dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot); - list_for_each_entry_from(label_ent, &nd_mapping->labels, list) { - if (label_ent->label) - continue; - label_ent->label = nd_label; - nd_label = NULL; - break; - } - if (nd_label) - dev_WARN(&nsblk->common.dev, - "failed to track label slot%d\n", slot); - } - mutex_unlock(&nd_mapping->lock); - - out: - kfree(old_res_list); - bitmap_free(victim_map); - return rc; - - abort: - /* - * 1/ repair the allocated label bitmap in the index - * 2/ restore the resource list - */ - nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd)); - kfree(nsblk->res); - nsblk->res = old_res_list; - nsblk->num_resources = old_num_resources; - old_res_list = NULL; - goto out; -} - static int init_labels(struct nd_mapping *nd_mapping, int num_labels) { int i, old_num_labels = 0; @@ -1425,26 +1105,6 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, return 0; } -int nd_blk_namespace_label_update(struct nd_region *nd_region, - struct nd_namespace_blk *nsblk, resource_size_t size) -{ - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct resource *res; - int count = 0; - - if (size == 0) - return del_labels(nd_mapping, nsblk->uuid); - - for_each_dpa_resource(to_ndd(nd_mapping), res) - count++; - - count = init_labels(nd_mapping, count); - if (count < 0) - return count; - - return __blk_label_update(nd_region, nd_mapping, nsblk, count); -} - int __init nd_label_init(void) { WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid)); diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 8ee248fc214f..198ef1df298b 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -221,9 +221,6 @@ bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot); u32 nd_label_nfree(struct nvdimm_drvdata *ndd); struct nd_region; struct nd_namespace_pmem; -struct nd_namespace_blk; int nd_pmem_namespace_label_update(struct nd_region *nd_region, struct nd_namespace_pmem *nspm, resource_size_t size); -int nd_blk_namespace_label_update(struct nd_region *nd_region, - struct nd_namespace_blk *nsblk, resource_size_t size); #endif /* __LABEL_H__ */ diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 5c76547c9b84..d1c190b02657 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -32,19 +32,6 @@ static void namespace_pmem_release(struct device *dev) kfree(nspm); } -static void namespace_blk_release(struct device *dev) -{ - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - struct nd_region *nd_region = to_nd_region(dev->parent); - - if (nsblk->id >= 0) - ida_simple_remove(&nd_region->ns_ida, nsblk->id); - kfree(nsblk->alt_name); - kfree(nsblk->uuid); - kfree(nsblk->res); - kfree(nsblk); -} - static bool is_namespace_pmem(const struct device *dev); static bool is_namespace_io(const struct device *dev); @@ -245,65 +232,6 @@ out: return rc; } -static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) -{ - struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent); - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_label_id label_id; - struct resource *res; - int count, i; - - if (!nsblk->uuid || !nsblk->lbasize || !ndd) - return false; - - count = 0; - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - for_each_dpa_resource(ndd, res) { - if (strcmp(res->name, label_id.id) != 0) - continue; - /* - * Resources with unacknowledged adjustments indicate a - * failure to update labels - */ - if (res->flags & DPA_RESOURCE_ADJUSTED) - return false; - count++; - } - - /* These values match after a successful label update */ - if (count != nsblk->num_resources) - return false; - - for (i = 0; i < nsblk->num_resources; i++) { - struct resource *found = NULL; - - for_each_dpa_resource(ndd, res) - if (res == nsblk->res[i]) { - found = res; - break; - } - /* stale resource */ - if (!found) - return false; - } - - return true; -} - -resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk) -{ - resource_size_t size; - - nvdimm_bus_lock(&nsblk->common.dev); - size = __nd_namespace_blk_validate(nsblk); - nvdimm_bus_unlock(&nsblk->common.dev); - - return size; -} -EXPORT_SYMBOL(nd_namespace_blk_validate); - - static int nd_namespace_label_update(struct nd_region *nd_region, struct device *dev) { @@ -1579,12 +1507,6 @@ static const struct device_type namespace_pmem_device_type = { .groups = nd_namespace_attribute_groups, }; -static const struct device_type namespace_blk_device_type = { - .name = "nd_namespace_blk", - .release = namespace_blk_release, - .groups = nd_namespace_attribute_groups, -}; - static bool is_namespace_pmem(const struct device *dev) { return dev ? dev->type == &namespace_pmem_device_type : false; @@ -1964,54 +1886,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(rc); } -struct resource *nsblk_add_resource(struct nd_region *nd_region, - struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, - resource_size_t start) -{ - struct nd_label_id label_id; - struct resource *res; - - nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); - res = krealloc(nsblk->res, - sizeof(void *) * (nsblk->num_resources + 1), - GFP_KERNEL); - if (!res) - return NULL; - nsblk->res = (struct resource **) res; - for_each_dpa_resource(ndd, res) - if (strcmp(res->name, label_id.id) == 0 - && res->start == start) { - nsblk->res[nsblk->num_resources++] = res; - return res; - } - return NULL; -} - -static struct device *nd_namespace_blk_create(struct nd_region *nd_region) -{ - struct nd_namespace_blk *nsblk; - struct device *dev; - - if (!is_nd_blk(&nd_region->dev)) - return NULL; - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - return NULL; - - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); - if (nsblk->id < 0) { - kfree(nsblk); - return NULL; - } - dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id); - dev->parent = &nd_region->dev; - - return &nsblk->common.dev; -} - static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) { struct nd_namespace_pmem *nspm; @@ -2050,10 +1924,7 @@ void nd_region_create_ns_seed(struct nd_region *nd_region) if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO) return; - if (is_nd_blk(&nd_region->dev)) - nd_region->ns_seed = nd_namespace_blk_create(nd_region); - else - nd_region->ns_seed = nd_namespace_pmem_create(nd_region); + nd_region->ns_seed = nd_namespace_pmem_create(nd_region); /* * Seed creation failures are not fatal, provisioning is simply @@ -2128,54 +1999,6 @@ static int add_namespace_resource(struct nd_region *nd_region, return i; } -static struct device *create_namespace_blk(struct nd_region *nd_region, - struct nd_namespace_label *nd_label, int count) -{ - - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nd_interleave_set *nd_set = nd_region->nd_set; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_blk *nsblk; - char name[NSLABEL_NAME_LEN]; - struct device *dev = NULL; - struct resource *res; - uuid_t uuid; - - if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid)) - return ERR_PTR(-EAGAIN); - if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2)) - return ERR_PTR(-EAGAIN); - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - return ERR_PTR(-ENOMEM); - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - dev->parent = &nd_region->dev; - nsblk->id = -1; - nsblk->lbasize = nsl_get_lbasize(ndd, nd_label); - nsl_get_uuid(ndd, nd_label, &uuid); - nsblk->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); - nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label); - if (!nsblk->uuid) - goto blk_err; - nsl_get_name(ndd, nd_label, name); - if (name[0]) { - nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); - if (!nsblk->alt_name) - goto blk_err; - } - res = nsblk_add_resource(nd_region, ndd, nsblk, - nsl_get_dpa(ndd, nd_label)); - if (!res) - goto blk_err; - nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); - return dev; - blk_err: - namespace_blk_release(dev); - return ERR_PTR(-ENXIO); -} - static int cmp_dpa(const void *a, const void *b) { const struct device *dev_a = *(const struct device **) a; @@ -2233,12 +2056,7 @@ static struct device **scan_labels(struct nd_region *nd_region) kfree(devs); devs = __devs; - if (is_nd_blk(&nd_region->dev)) - dev = create_namespace_blk(nd_region, nd_label, count); - else - dev = create_namespace_pmem(nd_region, nd_mapping, - nd_label); - + dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { switch (PTR_ERR(dev)) { case -EAGAIN: @@ -2260,30 +2078,21 @@ static struct device **scan_labels(struct nd_region *nd_region) ? "blk" : "pmem", count == 1 ? "" : "s"); if (count == 0) { + struct nd_namespace_pmem *nspm; + /* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); devs = kcalloc(2, sizeof(dev), GFP_KERNEL); if (!devs) goto err; - if (is_nd_blk(&nd_region->dev)) { - struct nd_namespace_blk *nsblk; - - nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); - if (!nsblk) - goto err; - dev = &nsblk->common.dev; - dev->type = &namespace_blk_device_type; - } else { - struct nd_namespace_pmem *nspm; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); - if (!nspm) - goto err; - dev = &nspm->nsio.common.dev; - dev->type = &namespace_pmem_device_type; - nd_namespace_pmem_set_resource(nd_region, nspm, 0); - } + nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); + if (!nspm) + goto err; + dev = &nspm->nsio.common.dev; + dev->type = &namespace_pmem_device_type; + nd_namespace_pmem_set_resource(nd_region, nspm, 0); dev->parent = &nd_region->dev; devs[count++] = dev; } else if (is_memory(&nd_region->dev)) { @@ -2318,10 +2127,7 @@ static struct device **scan_labels(struct nd_region *nd_region) err: if (devs) { for (i = 0; devs[i]; i++) - if (is_nd_blk(&nd_region->dev)) - namespace_blk_release(devs[i]); - else - namespace_pmem_release(devs[i]); + namespace_pmem_release(devs[i]); kfree(devs); } return NULL; @@ -2484,19 +2290,12 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) struct device *dev = devs[i]; int id; - if (type == ND_DEVICE_NAMESPACE_BLK) { - struct nd_namespace_blk *nsblk; - - nsblk = to_nd_namespace_blk(dev); - id = ida_simple_get(&nd_region->ns_ida, 0, 0, - GFP_KERNEL); - nsblk->id = id; - } else if (type == ND_DEVICE_NAMESPACE_PMEM) { + if (type == ND_DEVICE_NAMESPACE_PMEM) { struct nd_namespace_pmem *nspm; nspm = to_nd_namespace_pmem(dev); id = ida_simple_get(&nd_region->ns_ida, 0, 0, - GFP_KERNEL); + GFP_KERNEL); nspm->id = id; } else id = i; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index a11850dd475d..e4af0719cf33 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -150,9 +150,6 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); int alias_dpa_busy(struct device *dev, void *data); -struct resource *nsblk_add_resource(struct nd_region *nd_region, - struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, - resource_size_t start); int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); void get_ndd(struct nvdimm_drvdata *ndd); resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 6f8ce114032d..8391bf2729bc 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -687,7 +687,6 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, return false; } -resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk); const uuid_t *nd_dev_to_uuid(struct device *dev); bool pmem_should_map_pages(struct device *dev); #endif /* __ND_H__ */ diff --git a/include/linux/nd.h b/include/linux/nd.h index 4813c7089e5c..7b2ccbdc1cbc 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -136,27 +136,6 @@ struct nd_namespace_pmem { int id; }; -/** - * struct nd_namespace_blk - namespace for dimm-bounded persistent memory - * @alt_name: namespace name supplied in the dimm label - * @uuid: namespace name supplied in the dimm label - * @id: ida allocated id - * @lbasize: blk namespaces have a native sector size when btt not present - * @size: sum of all the resource ranges allocated to this namespace - * @num_resources: number of dpa extents to claim - * @res: discontiguous dpa extents for given dimm - */ -struct nd_namespace_blk { - struct nd_namespace_common common; - char *alt_name; - uuid_t *uuid; - int id; - unsigned long lbasize; - resource_size_t size; - int num_resources; - struct resource **res; -}; - static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev) { return container_of(dev, struct nd_namespace_io, common.dev); @@ -169,11 +148,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device return container_of(nsio, struct nd_namespace_pmem, nsio); } -static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev) -{ - return container_of(dev, struct nd_namespace_blk, common.dev); -} - /** * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace * @ndns: device to read -- cgit v1.2.3-71-gd317 From 3b6c6c039707f6bb7c64af2aa82a437fabb93aee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 9 Mar 2022 19:49:48 -0800 Subject: nvdimm/region: Delete nd_blk_region infrastructure Now that the nd_namespace_blk infrastructure is removed, delete all the region machinery to coordinate provisioning aliased capacity between PMEM and BLK. Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/164688418803.2879318.1302315202397235855.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/acpi/nfit/core.c | 11 +- drivers/nvdimm/bus.c | 2 - drivers/nvdimm/dimm_devs.c | 204 +++---------------------------------- drivers/nvdimm/label.c | 6 +- drivers/nvdimm/label.h | 2 +- drivers/nvdimm/namespace_devs.c | 127 +++-------------------- drivers/nvdimm/nd-core.h | 24 +---- drivers/nvdimm/nd.h | 12 --- drivers/nvdimm/region.c | 31 ++---- drivers/nvdimm/region_devs.c | 158 ++++------------------------ include/linux/libnvdimm.h | 24 ----- include/uapi/linux/ndctl.h | 2 - tools/testing/nvdimm/test/ndtest.c | 67 +----------- 13 files changed, 66 insertions(+), 604 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index bea6a219fddd..fe61f617a943 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2036,10 +2036,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; } - /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ - if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) - set_bit(NDD_NOBLK, &flags); - if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); @@ -2602,8 +2598,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, { static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc ndbr_desc; - struct nd_region_desc *ndr_desc; + struct nd_region_desc *ndr_desc, _ndr_desc; struct nfit_memdev *nfit_memdev; struct nvdimm_bus *nvdimm_bus; struct resource res; @@ -2619,10 +2614,10 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); res.start = spa->address; res.end = res.start + spa->length - 1; - ndr_desc = &ndbr_desc.ndr_desc; + ndr_desc = &_ndr_desc; ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9dc7f3edd42b..a4b5f637e599 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -35,8 +35,6 @@ static int to_nd_device_type(struct device *dev) return ND_DEVICE_DIMM; else if (is_memory(dev)) return ND_DEVICE_REGION_PMEM; - else if (is_nd_blk(dev)) - return ND_DEVICE_REGION_BLK; else if (is_nd_dax(dev)) return ND_DEVICE_DAX_PMEM; else if (is_nd_region(dev->parent)) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index dc7449a40003..ee507eed42b5 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -18,10 +18,6 @@ static DEFINE_IDA(dimm_ida); -static bool noblk; -module_param(noblk, bool, 0444); -MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); - /* * Retrieve bus and dimm handle and return if this bus supports * get_config_data commands @@ -211,22 +207,6 @@ struct nvdimm *to_nvdimm(struct device *dev) } EXPORT_SYMBOL_GPL(to_nvdimm); -struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) -{ - struct nd_region *nd_region = &ndbr->nd_region; - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - - return nd_mapping->nvdimm; -} -EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); - -unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) -{ - /* pmem mapping properties are private to libnvdimm */ - return ARCH_MEMREMAP_PMEM; -} -EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); - struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) { struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -312,8 +292,7 @@ static ssize_t flags_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); - return sprintf(buf, "%s%s%s\n", - test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", + return sprintf(buf, "%s%s\n", test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); } @@ -612,8 +591,6 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, nvdimm->dimm_id = dimm_id; nvdimm->provider_data = provider_data; - if (noblk) - flags |= 1 << NDD_NOBLK; nvdimm->flags = flags; nvdimm->cmd_mask = cmd_mask; nvdimm->num_flush = num_flush; @@ -726,133 +703,6 @@ static unsigned long dpa_align(struct nd_region *nd_region) return nd_region->align / nd_region->ndr_mappings; } -int alias_dpa_busy(struct device *dev, void *data) -{ - resource_size_t map_end, blk_start, new; - struct blk_alloc_info *info = data; - struct nd_mapping *nd_mapping; - struct nd_region *nd_region; - struct nvdimm_drvdata *ndd; - struct resource *res; - unsigned long align; - int i; - - if (!is_memory(dev)) - return 0; - - nd_region = to_nd_region(dev); - for (i = 0; i < nd_region->ndr_mappings; i++) { - nd_mapping = &nd_region->mapping[i]; - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) - break; - } - - if (i >= nd_region->ndr_mappings) - return 0; - - ndd = to_ndd(nd_mapping); - map_end = nd_mapping->start + nd_mapping->size - 1; - blk_start = nd_mapping->start; - - /* - * In the allocation case ->res is set to free space that we are - * looking to validate against PMEM aliasing collision rules - * (i.e. BLK is allocated after all aliased PMEM). - */ - if (info->res) { - if (info->res->start >= nd_mapping->start - && info->res->start < map_end) - /* pass */; - else - return 0; - } - - retry: - /* - * Find the free dpa from the end of the last pmem allocation to - * the end of the interleave-set mapping. - */ - align = dpa_align(nd_region); - if (!align) - return 0; - - for_each_dpa_resource(ndd, res) { - resource_size_t start, end; - - if (strncmp(res->name, "pmem", 4) != 0) - continue; - - start = ALIGN_DOWN(res->start, align); - end = ALIGN(res->end + 1, align) - 1; - if ((start >= blk_start && start < map_end) - || (end >= blk_start && end <= map_end)) { - new = max(blk_start, min(map_end, end) + 1); - if (new != blk_start) { - blk_start = new; - goto retry; - } - } - } - - /* update the free space range with the probed blk_start */ - if (info->res && blk_start > info->res->start) { - info->res->start = max(info->res->start, blk_start); - if (info->res->start > info->res->end) - info->res->end = info->res->start - 1; - return 1; - } - - info->available -= blk_start - nd_mapping->start; - - return 0; -} - -/** - * nd_blk_available_dpa - account the unused dpa of BLK region - * @nd_mapping: container of dpa-resource-root + labels - * - * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but - * we arrange for them to never start at an lower dpa than the last - * PMEM allocation in an aliased region. - */ -resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) -{ - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct blk_alloc_info info = { - .nd_mapping = nd_mapping, - .available = nd_mapping->size, - .res = NULL, - }; - struct resource *res; - unsigned long align; - - if (!ndd) - return 0; - - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); - - /* now account for busy blk allocations in unaliased dpa */ - align = dpa_align(nd_region); - if (!align) - return 0; - for_each_dpa_resource(ndd, res) { - resource_size_t start, end, size; - - if (strncmp(res->name, "blk", 3) != 0) - continue; - start = ALIGN_DOWN(res->start, align); - end = ALIGN(res->end + 1, align) - 1; - size = end - start + 1; - if (size >= info.available) - return 0; - info.available -= size; - } - - return info.available; -} - /** * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max * contiguous unallocated dpa range. @@ -900,24 +750,16 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa * @nd_mapping: container of dpa-resource-root + labels * @nd_region: constrain available space check to this reference region - * @overlap: calculate available space assuming this level of overlap * * Validate that a PMEM label, if present, aligns with the start of an - * interleave set and truncate the available size at the lowest BLK - * overlap point. - * - * The expectation is that this routine is called multiple times as it - * probes for the largest BLK encroachment for any single member DIMM of - * the interleave set. Once that value is determined the PMEM-limit for - * the set can be established. + * interleave set. */ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, - struct nd_mapping *nd_mapping, resource_size_t *overlap) + struct nd_mapping *nd_mapping) { - resource_size_t map_start, map_end, busy = 0, available, blk_start; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + resource_size_t map_start, map_end, busy = 0; struct resource *res; - const char *reason; unsigned long align; if (!ndd) @@ -929,46 +771,28 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, map_start = nd_mapping->start; map_end = map_start + nd_mapping->size - 1; - blk_start = max(map_start, map_end + 1 - *overlap); for_each_dpa_resource(ndd, res) { resource_size_t start, end; start = ALIGN_DOWN(res->start, align); end = ALIGN(res->end + 1, align) - 1; if (start >= map_start && start < map_end) { - if (strncmp(res->name, "blk", 3) == 0) - blk_start = min(blk_start, - max(map_start, start)); - else if (end > map_end) { - reason = "misaligned to iset"; - goto err; - } else - busy += end - start + 1; + if (end > map_end) { + nd_dbg_dpa(nd_region, ndd, res, + "misaligned to iset\n"); + return 0; + } + busy += end - start + 1; } else if (end >= map_start && end <= map_end) { - if (strncmp(res->name, "blk", 3) == 0) { - /* - * If a BLK allocation overlaps the start of - * PMEM the entire interleave set may now only - * be used for BLK. - */ - blk_start = map_start; - } else - busy += end - start + 1; + busy += end - start + 1; } else if (map_start > start && map_start < end) { /* total eclipse of the mapping */ busy += nd_mapping->size; - blk_start = map_start; } } - *overlap = map_end + 1 - blk_start; - available = blk_start - map_start; - if (busy < available) - return ALIGN_DOWN(available - busy, align); - return 0; - - err: - nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); + if (busy < nd_mapping->size) + return ALIGN_DOWN(nd_mapping->size - busy, align); return 0; } @@ -999,7 +823,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, /** * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id * @nvdimm: container of dpa-resource-root + labels - * @label_id: dpa resource name of the form {pmem|blk}- + * @label_id: dpa resource name of the form pmem- */ resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id) diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 8c972bcb2ac3..082253a3a956 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -334,8 +334,7 @@ char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, { if (!label_id || !uuid) return NULL; - snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb", - flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid); + snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid); return label_id->id; } @@ -406,7 +405,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) return 0; /* no label, nothing to reserve */ for_each_clear_bit_le(slot, free, nslot) { - struct nvdimm *nvdimm = to_nvdimm(ndd->dev); struct nd_namespace_label *nd_label; struct nd_region *nd_region = NULL; struct nd_label_id label_id; @@ -421,8 +419,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) nsl_get_uuid(ndd, nd_label, &label_uuid); flags = nsl_get_flags(ndd, nd_label); - if (test_bit(NDD_NOBLK, &nvdimm->flags)) - flags &= ~NSLABEL_FLAG_LOCAL; nd_label_gen_id(&label_id, &label_uuid, flags); res = nvdimm_allocate_dpa(ndd, &label_id, nsl_get_dpa(ndd, nd_label), diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 198ef1df298b..0650fb4b9821 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -193,7 +193,7 @@ struct nd_namespace_label { /** * struct nd_label_id - identifier string for dpa allocation - * @id: "{blk|pmem}-" + * @id: "pmem-" */ struct nd_label_id { char id[ND_LABEL_ID_SIZE]; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index d1c190b02657..62b83b2e26e3 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -297,13 +297,11 @@ static int scan_free(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_label_id *label_id, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); int rc = 0; while (n) { struct resource *res, *last; - resource_size_t new_start; last = NULL; for_each_dpa_resource(ndd, res) @@ -321,16 +319,7 @@ static int scan_free(struct nd_region *nd_region, continue; } - /* - * Keep BLK allocations relegated to high DPA as much as - * possible - */ - if (is_blk) - new_start = res->start + n; - else - new_start = res->start; - - rc = adjust_resource(res, new_start, resource_size(res) - n); + rc = adjust_resource(res, res->start, resource_size(res) - n); if (rc == 0) res->flags |= DPA_RESOURCE_ADJUSTED; nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); @@ -372,20 +361,12 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - resource_size_t first_dpa; struct resource *res; int rc = 0; - /* allocate blk from highest dpa first */ - if (is_blk) - first_dpa = nd_mapping->start + nd_mapping->size - n; - else - first_dpa = nd_mapping->start; - /* first resource allocation for this label-id or dimm */ - res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); + res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); if (!res) rc = -EBUSY; @@ -416,7 +397,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, resource_size_t n, struct resource *valid) { bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; unsigned long align; align = nd_region->align / nd_region->ndr_mappings; @@ -429,21 +409,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, if (is_reserve) return; - if (!is_pmem) { - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_bus *nvdimm_bus; - struct blk_alloc_info info = { - .nd_mapping = nd_mapping, - .available = nd_mapping->size, - .res = valid, - }; - - WARN_ON(!is_nd_blk(&nd_region->dev)); - nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); - return; - } - /* allocation needs to be contiguous, so this is all or nothing */ if (resource_size(valid) < n) goto invalid; @@ -471,7 +436,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, resource_size_t n) { resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res, *exist = NULL, valid; const resource_size_t to_allocate = n; @@ -569,10 +533,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, } if (strcmp(action, "allocate") == 0) { - /* BLK allocate bottom up */ - if (!is_pmem) - valid.start += available - allocate; - new_res = nvdimm_allocate_dpa(ndd, label_id, valid.start, allocate); if (!new_res) @@ -608,12 +568,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, return 0; } - /* - * If we allocated nothing in the BLK case it may be because we are in - * an initial "pmem-reserve pass". Only do an initial BLK allocation - * when none of the DPA space is reserved. - */ - if ((is_pmem || !ndd->dpa.child) && n == to_allocate) + if (n == to_allocate) return init_dpa_allocation(label_id, nd_region, nd_mapping, n); return n; } @@ -672,7 +627,7 @@ int __reserve_free_pmem(struct device *dev, void *data) if (nd_mapping->nvdimm != nvdimm) continue; - n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); + n = nd_pmem_available_dpa(nd_region, nd_mapping); if (n == 0) return 0; rem = scan_allocate(nd_region, nd_mapping, &label_id, n); @@ -697,19 +652,6 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus, nvdimm_free_dpa(ndd, res); } -static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, - struct nd_mapping *nd_mapping) -{ - struct nvdimm *nvdimm = nd_mapping->nvdimm; - int rc; - - rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, - __reserve_free_pmem); - if (rc) - release_free_pmem(nvdimm_bus, nd_mapping); - return rc; -} - /** * grow_dpa_allocation - for each dimm allocate n bytes for @label_id * @nd_region: the set of dimms to allocate @n more bytes from @@ -726,37 +668,14 @@ static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, static int grow_dpa_allocation(struct nd_region *nd_region, struct nd_label_id *label_id, resource_size_t n) { - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; int i; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; resource_size_t rem = n; - int rc, j; - - /* - * In the BLK case try once with all unallocated PMEM - * reserved, and once without - */ - for (j = is_pmem; j < 2; j++) { - bool blk_only = j == 0; - - if (blk_only) { - rc = reserve_free_pmem(nvdimm_bus, nd_mapping); - if (rc) - return rc; - } - rem = scan_allocate(nd_region, nd_mapping, - label_id, rem); - if (blk_only) - release_free_pmem(nvdimm_bus, nd_mapping); - - /* try again and allow encroachments into PMEM */ - if (rem == 0) - break; - } + int rc; + rem = scan_allocate(nd_region, nd_mapping, label_id, rem); dev_WARN_ONCE(&nd_region->dev, rem, "allocation underrun: %#llx of %#llx bytes\n", (unsigned long long) n - rem, @@ -869,8 +788,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) ndd = to_ndd(nd_mapping); /* - * All dimms in an interleave set, or the base dimm for a blk - * region, need to be enabled for the size to be changed. + * All dimms in an interleave set, need to be enabled + * for the size to be changed. */ if (!ndd) return -ENXIO; @@ -1169,9 +1088,6 @@ static ssize_t resource_show(struct device *dev, } static DEVICE_ATTR_ADMIN_RO(resource); -static const unsigned long blk_lbasize_supported[] = { 512, 520, 528, - 4096, 4104, 4160, 4224, 0 }; - static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 }; static ssize_t sector_size_show(struct device *dev, @@ -1823,10 +1739,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, /* * Fix up each mapping's 'labels' to have the validated pmem label for * that position at labels[0], and NULL at labels[1]. In the process, - * check that the namespace aligns with interleave-set. We know - * that it does not overlap with any blk namespaces by virtue of - * the dimm being enabled (i.e. nd_label_reserve_dpa() - * succeeded). + * check that the namespace aligns with interleave-set. */ nsl_get_uuid(ndd, nd_label, &uuid); rc = select_pmem_id(nd_region, &uuid); @@ -1931,8 +1844,7 @@ void nd_region_create_ns_seed(struct nd_region *nd_region) * disabled until memory becomes available */ if (!nd_region->ns_seed) - dev_err(&nd_region->dev, "failed to create %s namespace\n", - is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); + dev_err(&nd_region->dev, "failed to create namespace\n"); else nd_device_register(nd_region->ns_seed); } @@ -2028,16 +1940,9 @@ static struct device **scan_labels(struct nd_region *nd_region) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; struct device **__devs; - u32 flags; if (!nd_label) continue; - flags = nsl_get_flags(ndd, nd_label); - if (is_nd_blk(&nd_region->dev) - == !!(flags & NSLABEL_FLAG_LOCAL)) - /* pass, region matches label type */; - else - continue; /* skip labels that describe extents outside of the region */ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || @@ -2073,9 +1978,8 @@ static struct device **scan_labels(struct nd_region *nd_region) } - dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n", - count, is_nd_blk(&nd_region->dev) - ? "blk" : "pmem", count == 1 ? "" : "s"); + dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, + count == 1 ? "" : "s"); if (count == 0) { struct nd_namespace_pmem *nspm; @@ -2226,12 +2130,6 @@ static int init_active_labels(struct nd_region *nd_region) if (!label_ent) break; label = nd_label_active(ndd, j); - if (test_bit(NDD_NOBLK, &nvdimm->flags)) { - u32 flags = nsl_get_flags(ndd, label); - - flags &= ~NSLABEL_FLAG_LOCAL; - nsl_set_flags(ndd, label, flags); - } label_ent->label = label; mutex_lock(&nd_mapping->lock); @@ -2275,7 +2173,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) devs = create_namespace_io(nd_region); break; case ND_DEVICE_NAMESPACE_PMEM: - case ND_DEVICE_NAMESPACE_BLK: devs = create_namespaces(nd_region); break; default: diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index e4af0719cf33..17febf9ac911 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -82,30 +82,12 @@ static inline void nvdimm_security_overwrite_query(struct work_struct *work) } #endif -/** - * struct blk_alloc_info - tracking info for BLK dpa scanning - * @nd_mapping: blk region mapping boundaries - * @available: decremented in alias_dpa_busy as aliased PMEM is scanned - * @busy: decremented in blk_dpa_busy to account for ranges already - * handled by alias_dpa_busy - * @res: alias_dpa_busy interprets this a free space range that needs to - * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy - * treats it as a busy range that needs the aliased PMEM ranges - * truncated. - */ -struct blk_alloc_info { - struct nd_mapping *nd_mapping; - resource_size_t available, busy; - struct resource *res; -}; - bool is_nvdimm(struct device *dev); bool is_nd_pmem(struct device *dev); bool is_nd_volatile(struct device *dev); -bool is_nd_blk(struct device *dev); static inline bool is_nd_region(struct device *dev) { - return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); + return is_nd_pmem(dev) || is_nd_volatile(dev); } static inline bool is_memory(struct device *dev) { @@ -142,14 +124,12 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping); resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, - struct nd_mapping *nd_mapping, resource_size_t *overlap); -resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); + struct nd_mapping *nd_mapping); resource_size_t nd_region_available_dpa(struct nd_region *nd_region); int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, resource_size_t size); resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); -int alias_dpa_busy(struct device *dev, void *data); int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); void get_ndd(struct nvdimm_drvdata *ndd); resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 8391bf2729bc..ec5219680092 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -295,9 +295,6 @@ static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd, return nd_label->efi.uuid; } -bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, - u64 isetcookie); bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, guid_t *guid); enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, @@ -437,14 +434,6 @@ static inline bool nsl_validate_nlabel(struct nd_region *nd_region, return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings; } -struct nd_blk_region { - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); - void *blk_provider_data; - struct nd_region nd_region; -}; - /* * Lookup next in the repeating sequence of 01, 10, and 11. */ @@ -672,7 +661,6 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return -ENXIO; } #endif -int nd_blk_region_init(struct nd_region *nd_region); int nd_region_activate(struct nd_region *nd_region); static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index e0c34120df37..188560b1c110 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -15,6 +15,10 @@ static int nd_region_probe(struct device *dev) static unsigned long once; struct nd_region_data *ndrd; struct nd_region *nd_region = to_nd_region(dev); + struct range range = { + .start = nd_region->ndr_start, + .end = nd_region->ndr_start + nd_region->ndr_size - 1, + }; if (nd_region->num_lanes > num_online_cpus() && nd_region->num_lanes < num_possible_cpus() @@ -30,25 +34,13 @@ static int nd_region_probe(struct device *dev) if (rc) return rc; - rc = nd_blk_region_init(nd_region); - if (rc) - return rc; - - if (is_memory(&nd_region->dev)) { - struct range range = { - .start = nd_region->ndr_start, - .end = nd_region->ndr_start + nd_region->ndr_size - 1, - }; - - if (devm_init_badblocks(dev, &nd_region->bb)) - return -ENODEV; - nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd, - "badblocks"); - if (!nd_region->bb_state) - dev_warn(&nd_region->dev, - "'badblocks' notification disabled\n"); - nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); - } + if (devm_init_badblocks(dev, &nd_region->bb)) + return -ENODEV; + nd_region->bb_state = + sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks"); + if (!nd_region->bb_state) + dev_warn(dev, "'badblocks' notification disabled\n"); + nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); rc = nd_region_register_namespaces(nd_region, &err); if (rc < 0) @@ -158,4 +150,3 @@ void nd_region_exit(void) } MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM); -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 70ad891a76ba..0cb274c2b508 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -134,10 +134,7 @@ static void nd_region_release(struct device *dev) } free_percpu(nd_region->lane); memregion_free(nd_region->id); - if (is_nd_blk(dev)) - kfree(to_nd_blk_region(dev)); - else - kfree(nd_region); + kfree(nd_region); } struct nd_region *to_nd_region(struct device *dev) @@ -157,33 +154,12 @@ struct device *nd_region_dev(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nd_region_dev); -struct nd_blk_region *to_nd_blk_region(struct device *dev) -{ - struct nd_region *nd_region = to_nd_region(dev); - - WARN_ON(!is_nd_blk(dev)); - return container_of(nd_region, struct nd_blk_region, nd_region); -} -EXPORT_SYMBOL_GPL(to_nd_blk_region); - void *nd_region_provider_data(struct nd_region *nd_region) { return nd_region->provider_data; } EXPORT_SYMBOL_GPL(nd_region_provider_data); -void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) -{ - return ndbr->blk_provider_data; -} -EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); - -void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) -{ - ndbr->blk_provider_data = data; -} -EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); - /** * nd_region_to_nstype() - region to an integer namespace type * @nd_region: region-device to interrogate @@ -208,8 +184,6 @@ int nd_region_to_nstype(struct nd_region *nd_region) return ND_DEVICE_NAMESPACE_PMEM; else return ND_DEVICE_NAMESPACE_IO; - } else if (is_nd_blk(&nd_region->dev)) { - return ND_DEVICE_NAMESPACE_BLK; } return 0; @@ -332,14 +306,12 @@ static DEVICE_ATTR_RO(set_cookie); resource_size_t nd_region_available_dpa(struct nd_region *nd_region) { - resource_size_t blk_max_overlap = 0, available, overlap; + resource_size_t available; int i; WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); - retry: available = 0; - overlap = blk_max_overlap; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -348,15 +320,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) if (!ndd) return 0; - if (is_memory(&nd_region->dev)) { - available += nd_pmem_available_dpa(nd_region, - nd_mapping, &overlap); - if (overlap > blk_max_overlap) { - blk_max_overlap = overlap; - goto retry; - } - } else if (is_nd_blk(&nd_region->dev)) - available += nd_blk_available_dpa(nd_region); + available += nd_pmem_available_dpa(nd_region, nd_mapping); } return available; @@ -364,26 +328,17 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) { - resource_size_t available = 0; + resource_size_t avail = 0; int i; - if (is_memory(&nd_region->dev)) - available = PHYS_ADDR_MAX; - WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; - if (is_memory(&nd_region->dev)) - available = min(available, - nd_pmem_max_contiguous_dpa(nd_region, - nd_mapping)); - else if (is_nd_blk(&nd_region->dev)) - available += nd_blk_available_dpa(nd_region); + avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa( + nd_region, nd_mapping)); } - if (is_memory(&nd_region->dev)) - return available * nd_region->ndr_mappings; - return available; + return avail * nd_region->ndr_mappings; } static ssize_t available_size_show(struct device *dev, @@ -693,9 +648,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) && a != &dev_attr_available_size.attr) return a->mode; - if ((type == ND_DEVICE_NAMESPACE_PMEM - || type == ND_DEVICE_NAMESPACE_BLK) - && a == &dev_attr_available_size.attr) + if (type == ND_DEVICE_NAMESPACE_PMEM && + a == &dev_attr_available_size.attr) return a->mode; else if (is_memory(dev) && nd_set) return a->mode; @@ -828,12 +782,6 @@ static const struct attribute_group *nd_region_attribute_groups[] = { NULL, }; -static const struct device_type nd_blk_device_type = { - .name = "nd_blk", - .release = nd_region_release, - .groups = nd_region_attribute_groups, -}; - static const struct device_type nd_pmem_device_type = { .name = "nd_pmem", .release = nd_region_release, @@ -851,11 +799,6 @@ bool is_nd_pmem(struct device *dev) return dev ? dev->type == &nd_pmem_device_type : false; } -bool is_nd_blk(struct device *dev) -{ - return dev ? dev->type == &nd_blk_device_type : false; -} - bool is_nd_volatile(struct device *dev) { return dev ? dev->type == &nd_volatile_device_type : false; @@ -929,22 +872,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) nvdimm_bus_unlock(dev); } -int nd_blk_region_init(struct nd_region *nd_region) -{ - struct device *dev = &nd_region->dev; - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - - if (!is_nd_blk(dev)) - return 0; - - if (nd_region->ndr_mappings < 1) { - dev_dbg(dev, "invalid BLK region\n"); - return -ENXIO; - } - - return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); -} - /** * nd_region_acquire_lane - allocate and lock a lane * @nd_region: region id and number of lanes possible @@ -1007,24 +934,10 @@ EXPORT_SYMBOL(nd_region_release_lane); static unsigned long default_align(struct nd_region *nd_region) { unsigned long align; - int i, mappings; u32 remainder; + int mappings; - if (is_nd_blk(&nd_region->dev)) - align = PAGE_SIZE; - else - align = MEMREMAP_COMPAT_ALIGN_MAX; - - for (i = 0; i < nd_region->ndr_mappings; i++) { - struct nd_mapping *nd_mapping = &nd_region->mapping[i]; - struct nvdimm *nvdimm = nd_mapping->nvdimm; - - if (test_bit(NDD_ALIASING, &nvdimm->flags)) { - align = MEMREMAP_COMPAT_ALIGN_MAX; - break; - } - } - + align = MEMREMAP_COMPAT_ALIGN_MAX; if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) align = PAGE_SIZE; @@ -1042,7 +955,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, { struct nd_region *nd_region; struct device *dev; - void *region_buf; unsigned int i; int ro = 0; @@ -1060,36 +972,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, if (test_bit(NDD_UNARMED, &nvdimm->flags)) ro = 1; - if (test_bit(NDD_NOBLK, &nvdimm->flags) - && dev_type == &nd_blk_device_type) { - dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", - caller, dev_name(&nvdimm->dev), i); - return NULL; - } } - if (dev_type == &nd_blk_device_type) { - struct nd_blk_region_desc *ndbr_desc; - struct nd_blk_region *ndbr; - - ndbr_desc = to_blk_region_desc(ndr_desc); - ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) - * ndr_desc->num_mappings, - GFP_KERNEL); - if (ndbr) { - nd_region = &ndbr->nd_region; - ndbr->enable = ndbr_desc->enable; - ndbr->do_io = ndbr_desc->do_io; - } - region_buf = ndbr; - } else { - nd_region = kzalloc(struct_size(nd_region, mapping, - ndr_desc->num_mappings), - GFP_KERNEL); - region_buf = nd_region; - } + nd_region = + kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings), + GFP_KERNEL); - if (!region_buf) + if (!nd_region) return NULL; nd_region->id = memregion_alloc(GFP_KERNEL); if (nd_region->id < 0) @@ -1153,7 +1042,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, err_percpu: memregion_free(nd_region->id); err_id: - kfree(region_buf); + kfree(nd_region); return NULL; } @@ -1166,17 +1055,6 @@ struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, } EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); -struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, - struct nd_region_desc *ndr_desc) -{ - if (ndr_desc->num_mappings > 1) - return NULL; - ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); - return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, - __func__); -} -EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); - struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc) { @@ -1201,7 +1079,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) } /** * nvdimm_flush - flush any posted write queues between the cpu and pmem media - * @nd_region: blk or interleaved pmem region + * @nd_region: interleaved pmem region */ int generic_nvdimm_flush(struct nd_region *nd_region) { @@ -1234,7 +1112,7 @@ EXPORT_SYMBOL_GPL(nvdimm_flush); /** * nvdimm_has_flush - determine write flushing requirements - * @nd_region: blk or interleaved pmem region + * @nd_region: interleaved pmem region * * Returns 1 if writes require flushing * Returns 0 if writes do not require flushing diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7074aa9af525..0d61e07b6827 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -25,8 +25,6 @@ struct badrange { }; enum { - /* when a dimm supports both PMEM and BLK access a label is required */ - NDD_ALIASING = 0, /* unarmed memory devices may not persist writes */ NDD_UNARMED = 1, /* locked memory devices should not be accessed */ @@ -35,8 +33,6 @@ enum { NDD_SECURITY_OVERWRITE = 3, /* tracking whether or not there is a pending device reference */ NDD_WORK_PENDING = 4, - /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ - NDD_NOBLK = 5, /* dimm supports namespace labels */ NDD_LABELING = 6, @@ -140,21 +136,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, } struct nvdimm_bus; -struct module; -struct nd_blk_region; -struct nd_blk_region_desc { - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); - struct nd_region_desc ndr_desc; -}; - -static inline struct nd_blk_region_desc *to_blk_region_desc( - struct nd_region_desc *ndr_desc) -{ - return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); - -} /* * Note that separate bits for locked + unlocked are defined so that @@ -257,7 +238,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); struct device *nd_region_dev(struct nd_region *nd_region); -struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); @@ -295,10 +275,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc); void *nd_region_provider_data(struct nd_region *nd_region); -void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); -void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); -struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); -unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 8cf1e4884fd5..17e02b64ea2e 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) #define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */ #define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */ #define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */ -#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */ #define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */ enum nd_driver_flags { @@ -198,7 +197,6 @@ enum nd_driver_flags { ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK, ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO, ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM, - ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK, ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, }; diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c index 3ca7c32e9362..4d1a947367f9 100644 --- a/tools/testing/nvdimm/test/ndtest.c +++ b/tools/testing/nvdimm/test/ndtest.c @@ -338,62 +338,6 @@ static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc, return 0; } -static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw) -{ - struct ndtest_dimm *dimm = ndbr->blk_provider_data; - struct ndtest_blk_mmio *mmio = dimm->mmio; - struct nd_region *nd_region = &ndbr->nd_region; - unsigned int lane; - - if (!mmio) - return -ENOMEM; - - lane = nd_region_acquire_lane(nd_region); - if (rw) - memcpy(mmio->base + dpa, iobuf, len); - else { - memcpy(iobuf, mmio->base + dpa, len); - arch_invalidate_pmem(mmio->base + dpa, len); - } - - nd_region_release_lane(nd_region, lane); - - return 0; -} - -static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nvdimm *nvdimm; - struct ndtest_dimm *dimm; - struct ndtest_blk_mmio *mmio; - - nvdimm = nd_blk_region_to_dimm(ndbr); - dimm = nvdimm_provider_data(nvdimm); - - nd_blk_region_set_provider_data(ndbr, dimm); - dimm->blk_region = to_nd_region(dev); - - mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL); - if (!mmio) - return -ENOMEM; - - mmio->base = (void __iomem *) devm_nvdimm_memremap( - dev, dimm->address, 12, nd_blk_memremap_flags(ndbr)); - if (!mmio->base) { - dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = dimm->size; - mmio->base_offset = 0; - - dimm->mmio = mmio; - - return 0; -} - static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr) { int i; @@ -523,17 +467,16 @@ static int ndtest_create_region(struct ndtest_priv *p, struct ndtest_region *region) { struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING]; - struct nd_blk_region_desc ndbr_desc; + struct nd_region_desc *ndr_desc, _ndr_desc; struct nd_interleave_set *nd_set; - struct nd_region_desc *ndr_desc; struct resource res; int i, ndimm = region->mapping[0].dimm; u64 uuid[2]; memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); - ndr_desc = &ndbr_desc.ndr_desc; + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); + ndr_desc = &_ndr_desc; if (!ndtest_alloc_resource(p, region->size, &res.start)) return -ENOMEM; @@ -857,10 +800,8 @@ static int ndtest_dimm_register(struct ndtest_priv *priv, struct device *dev = &priv->pdev.dev; unsigned long dimm_flags = dimm->flags; - if (dimm->num_formats > 1) { - set_bit(NDD_ALIASING, &dimm_flags); + if (dimm->num_formats > 1) set_bit(NDD_LABELING, &dimm_flags); - } if (dimm->flags & PAPR_PMEM_UNARMED_MASK) set_bit(NDD_UNARMED, &dimm_flags); -- cgit v1.2.3-71-gd317 From c97448437847bd76116b3a077e44808e946bb1ae Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 25 Feb 2022 15:35:29 +0100 Subject: clk: Add clk_drop_range In order to reset the range on a clock, we need to call clk_set_rate_range with a minimum of 0 and a maximum of ULONG_MAX. Since it's fairly inconvenient, let's introduce a clk_drop_range() function that will do just this. Suggested-by: Stephen Boyd Signed-off-by: Maxime Ripard Link: https://lore.kernel.org/r/20220225143534.405820-8-maxime@cerno.tech Signed-off-by: Stephen Boyd --- drivers/clk/clk_test.c | 4 ++-- include/linux/clk.h | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c index 8924a5ac56d8..48f95d1784c8 100644 --- a/drivers/clk/clk_test.c +++ b/drivers/clk/clk_test.c @@ -645,7 +645,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test) KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_ASSERT_EQ(test, - clk_set_rate_range(user2, 0, ULONG_MAX), + clk_drop_range(user2), 0); rate = clk_get_rate(clk); @@ -762,7 +762,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test) KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); KUNIT_ASSERT_EQ(test, - clk_set_rate_range(user2, 0, ULONG_MAX), + clk_drop_range(user2), 0); rate = clk_get_rate(clk); diff --git a/include/linux/clk.h b/include/linux/clk.h index 266e8de3cb51..39faa54efe88 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -986,6 +986,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks, clk_bulk_unprepare(num_clks, clks); } +/** + * clk_drop_range - Reset any range set on that clock + * @clk: clock source + * + * Returns success (0) or negative errno. + */ +static inline int clk_drop_range(struct clk *clk) +{ + return clk_set_rate_range(clk, 0, ULONG_MAX); +} + /** * clk_get_optional - lookup and obtain a reference to an optional clock * producer. -- cgit v1.2.3-71-gd317 From d3826a95222c44a527f76b011bb5af8c924632e9 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Fri, 11 Mar 2022 11:43:06 +0100 Subject: nfp: add support for NFP3800/NFP3803 PCIe devices Enable binding the nfp driver to NFP3800 and NFP3803 devices. The PCIE_SRAM offset is different for the NFP3800 device, which also only supports a single explicit group. Changes to Dirk's work: * 48-bit dma addressing is not ready yet. Keep 40-bit dma addressing for NFP3800. Signed-off-by: Dirk van der Merwe Signed-off-by: Jakub Kicinski Signed-off-by: Fei Qin Signed-off-by: Simon Horman Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 4 ++++ drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c | 4 ++++ drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 4 ---- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c | 19 +++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h | 2 ++ include/linux/pci_ids.h | 2 ++ 6 files changed, 31 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index dd135ac8b1a3..8693f9905fbe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -33,6 +33,10 @@ static const char nfp_driver_name[] = "nfp"; static const struct pci_device_id nfp_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP3800, + }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index db4301f8cd85..9ef226c6706e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -38,6 +38,10 @@ struct nfp_net_vf { static const char nfp_net_driver_name[] = "nfp_netvf"; static const struct pci_device_id nfp_netvf_pci_device_ids[] = { + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800_VF, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF, + }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 2dd0f5842873..3d379e937184 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -32,10 +32,6 @@ #define PCI_64BIT_BAR_COUNT 3 -/* NFP hardware vendor/device ids. - */ -#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 - #define NFP_CPP_NUM_TARGETS 16 /* Max size of area it should be safe to request */ #define NFP_CPP_SAFE_AREA_SIZE SZ_2M diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c index 0c1ef01f90eb..28384d6d1c6f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -8,6 +8,25 @@ #include "nfp_dev.h" const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { + [NFP_DEV_NFP3800] = { + .dma_mask = DMA_BIT_MASK(40), + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0x400000, + .min_qc_size = 512, + .max_qc_size = SZ_64K, + + .chip_names = "NFP3800", + .pcie_cfg_expbar_offset = 0x0a00, + .pcie_expl_offset = 0xd000, + .qc_area_sz = 0x100000, + }, + [NFP_DEV_NFP3800_VF] = { + .dma_mask = DMA_BIT_MASK(40), + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0, + .min_qc_size = 512, + .max_qc_size = SZ_64K, + }, [NFP_DEV_NFP6000] = { .dma_mask = DMA_BIT_MASK(40), .qc_idx_mask = GENMASK(7, 0), diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index deadd9b97f9f..d4189869cf7b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -7,6 +7,8 @@ #include enum nfp_dev_id { + NFP_DEV_NFP3800, + NFP_DEV_NFP3800_VF, NFP_DEV_NFP6000, NFP_DEV_NFP6000_VF, NFP_DEV_CNT, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c7e6f2043c7d..5462c29f0538 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2531,9 +2531,11 @@ #define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 #define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 +#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 #define PCI_VENDOR_ID_QMI 0x1a32 -- cgit v1.2.3-71-gd317 From 625788b5844511cf4c30cffa7fa0bc3a69cebc82 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 10 Mar 2022 21:14:20 -0800 Subject: net: add per-cpu storage and net->core_stats Before adding yet another possibly contended atomic_long_t, it is time to add per-cpu storage for existing ones: dev->tx_dropped, dev->rx_dropped, and dev->rx_nohandler Because many devices do not have to increment such counters, allocate the per-cpu storage on demand, so that dev_get_stats() does not have to spend considerable time folding zero counters. Note that some drivers have abused these counters which were supposed to be only used by core networking stack. v4: should use per_cpu_ptr() in dev_get_stats() (Jakub) v3: added a READ_ONCE() in netdev_core_stats_alloc() (Paolo) v2: add a missing include (reported by kernel test robot ) Change in netdev_core_stats_alloc() (Jakub) Signed-off-by: Eric Dumazet Cc: jeffreyji Reviewed-by: Brian Vazquez Reviewed-by: Jakub Kicinski Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20220311051420.2608812-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/bonding/bond_main.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 4 +- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 2 +- drivers/net/ipvlan/ipvlan_core.c | 2 +- drivers/net/macvlan.c | 2 +- drivers/net/net_failover.c | 2 +- drivers/net/tun.c | 16 +++---- drivers/net/vxlan/vxlan_core.c | 2 +- include/linux/netdevice.h | 46 +++++++++++++++---- include/net/bonding.h | 2 +- net/core/dev.c | 51 ++++++++++++++++++---- net/core/gro_cells.c | 2 +- net/hsr/hsr_device.c | 2 +- net/xfrm/xfrm_device.c | 2 +- 15 files changed, 101 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 55e0ba2a163d..15eddca7b4b6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5120,7 +5120,7 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, if (xmit_suc) return NETDEV_TX_OK; - atomic_long_inc(&bond_dev->tx_dropped); + dev_core_stats_tx_dropped_inc(bond_dev); return NET_XMIT_DROP; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2de02950086f..92a1a43b3bee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -370,7 +370,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { dev_kfree_skb_any(skb); - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); return NETDEV_TX_OK; } @@ -646,7 +646,7 @@ tx_kick_pending: if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); txr->tx_buf_ring[txr->tx_prod].skb = NULL; - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index d7a27c244d48..54faf0f2d1d8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -887,8 +887,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev, p[21] = net_stats->rx_compressed; p[22] = net_stats->tx_compressed; - p[23] = netdev->rx_dropped.counter; - p[24] = netdev->tx_dropped.counter; + p[23] = 0; /* was netdev->rx_dropped.counter */ + p[24] = 0; /* was netdev->tx_dropped.counter */ p[25] = priv->tx_timeout_count; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index bfbd7847f946..a313242a762e 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -207,7 +207,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) dev = skb->dev; port = rmnet_get_port_rcu(dev); if (unlikely(!port)) { - atomic_long_inc(&skb->dev->rx_nohandler); + dev_core_stats_rx_nohandler_inc(skb->dev); kfree_skb(skb); goto done; } diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index c613900c3811..6ffb27419e64 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -555,7 +555,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port, schedule_work(&port->wq); } else { spin_unlock(&port->backlog.lock); - atomic_long_inc(&skb->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb(skb); } } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 33753a2fde29..4b77819e9328 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -371,7 +371,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, free_nskb: kfree_skb(nskb); err: - atomic_long_inc(&skb->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(skb->dev); } static void macvlan_flush_sources(struct macvlan_port *port, diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 86ec5aae4289..21a0435c02de 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -89,7 +89,7 @@ static int net_failover_close(struct net_device *dev) static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb, struct net_device *dev) { - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2b9a22669a12..276a0e42ca8e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1135,7 +1135,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; drop: - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); skb_tx_error(skb); kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); @@ -1291,7 +1291,7 @@ resample: void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); break; } nxmit++; @@ -1626,7 +1626,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); break; } @@ -1797,7 +1797,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, */ skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); if (IS_ERR(skb)) { - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); return PTR_ERR(skb); } if (!skb) @@ -1826,7 +1826,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EAGAIN) - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); if (frags) mutex_unlock(&tfile->napi_mutex); return PTR_ERR(skb); @@ -1841,7 +1841,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, err = -EFAULT; drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; drop: - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); kfree_skb_reason(skb, drop_reason); if (frags) { tfile->napi.skb = NULL; @@ -1876,7 +1876,7 @@ drop: pi.proto = htons(ETH_P_IPV6); break; default: - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); kfree_skb(skb); return -EINVAL; } @@ -1956,7 +1956,7 @@ drop: skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { - atomic_long_inc(&tun->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(tun->dev); napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 3872f76ea1d3..de97ff98d36e 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1760,7 +1760,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (unlikely(!(vxlan->dev->flags & IFF_UP))) { rcu_read_unlock(); - atomic_long_inc(&vxlan->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(vxlan->dev); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX_DROPS, 0); goto drop; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index acd3cf69b61f..0d994710b335 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -194,6 +195,14 @@ struct net_device_stats { unsigned long tx_compressed; }; +/* per-cpu stats, allocated on demand. + * Try to fit them in a single cache line, for dev_get_stats() sake. + */ +struct net_device_core_stats { + local_t rx_dropped; + local_t tx_dropped; + local_t rx_nohandler; +} __aligned(4 * sizeof(local_t)); #include #include @@ -1735,12 +1744,8 @@ enum netdev_ml_priv_type { * @stats: Statistics struct, which was left as a legacy, use * rtnl_link_stats64 instead * - * @rx_dropped: Dropped packets by core network, - * do not use this in drivers - * @tx_dropped: Dropped packets by core network, + * @core_stats: core networking counters, * do not use this in drivers - * @rx_nohandler: nohandler dropped packets by core network on - * inactive devices, do not use this in drivers * @carrier_up_count: Number of times the carrier has been up * @carrier_down_count: Number of times the carrier has been down * @@ -2023,9 +2028,7 @@ struct net_device { struct net_device_stats stats; /* not used by modern drivers */ - atomic_long_t rx_dropped; - atomic_long_t tx_dropped; - atomic_long_t rx_nohandler; + struct net_device_core_stats __percpu *core_stats; /* Stats to monitor link on/off, flapping */ atomic_t carrier_up_count; @@ -3839,13 +3842,38 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev, return false; } +struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev); + +static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev) +{ + /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ + struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); + + if (likely(p)) + return this_cpu_ptr(p); + + return netdev_core_stats_alloc(dev); +} + +#define DEV_CORE_STATS_INC(FIELD) \ +static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ +{ \ + struct net_device_core_stats *p = dev_core_stats(dev); \ + \ + if (p) \ + local_inc(&p->FIELD); \ +} +DEV_CORE_STATS_INC(rx_dropped) +DEV_CORE_STATS_INC(tx_dropped) +DEV_CORE_STATS_INC(rx_nohandler) + static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, const bool check_mtu) { if (skb_orphan_frags(skb, GFP_ATOMIC) || unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { - atomic_long_inc(&dev->rx_dropped); + dev_core_stats_rx_dropped_inc(dev); kfree_skb(skb); return NET_RX_DROP; } diff --git a/include/net/bonding.h b/include/net/bonding.h index d0dfe727e0b1..b14f4c0b4e9e 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -770,7 +770,7 @@ extern const struct sysfs_ops slave_sysfs_ops; static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); dev_kfree_skb_any(skb); return NET_XMIT_DROP; } diff --git a/net/core/dev.c b/net/core/dev.c index 7ed27c178a1f..8d25ec5b3af7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3633,7 +3633,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device out_kfree_skb: kfree_skb(skb); out_null: - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); return NULL; } @@ -4184,7 +4184,7 @@ recursion_alert: rc = -ENETDOWN; rcu_read_unlock_bh(); - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); kfree_skb_list(skb); return rc; out: @@ -4236,7 +4236,7 @@ int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) local_bh_enable(); return ret; drop: - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); kfree_skb_list(skb); return NET_XMIT_DROP; } @@ -4602,7 +4602,7 @@ drop: sd->dropped++; rps_unlock_irq_restore(sd, &flags); - atomic_long_inc(&skb->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb_reason(skb, reason); return NET_RX_DROP; } @@ -5357,10 +5357,10 @@ check_vlan_id: } else { drop: if (!deliver_exact) { - atomic_long_inc(&skb->dev->rx_dropped); + dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb_reason(skb, SKB_DROP_REASON_PTYPE_ABSENT); } else { - atomic_long_inc(&skb->dev->rx_nohandler); + dev_core_stats_rx_nohandler_inc(skb->dev); kfree_skb(skb); } /* Jamal, now you will not able to escape explaining @@ -10280,6 +10280,25 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, } EXPORT_SYMBOL(netdev_stats_to_stats64); +struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev) +{ + struct net_device_core_stats __percpu *p; + + p = alloc_percpu_gfp(struct net_device_core_stats, + GFP_ATOMIC | __GFP_NOWARN); + + if (p && cmpxchg(&dev->core_stats, NULL, p)) + free_percpu(p); + + /* This READ_ONCE() pairs with the cmpxchg() above */ + p = READ_ONCE(dev->core_stats); + if (!p) + return NULL; + + return this_cpu_ptr(p); +} +EXPORT_SYMBOL(netdev_core_stats_alloc); + /** * dev_get_stats - get network device statistics * @dev: device to get statistics from @@ -10294,6 +10313,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage) { const struct net_device_ops *ops = dev->netdev_ops; + const struct net_device_core_stats __percpu *p; if (ops->ndo_get_stats64) { memset(storage, 0, sizeof(*storage)); @@ -10303,9 +10323,20 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } - storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); - storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); - storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); + + /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ + p = READ_ONCE(dev->core_stats); + if (p) { + const struct net_device_core_stats *core_stats; + int i; + + for_each_possible_cpu(i) { + core_stats = per_cpu_ptr(p, i); + storage->rx_dropped += local_read(&core_stats->rx_dropped); + storage->tx_dropped += local_read(&core_stats->tx_dropped); + storage->rx_nohandler += local_read(&core_stats->rx_nohandler); + } + } return storage; } EXPORT_SYMBOL(dev_get_stats); @@ -10567,6 +10598,8 @@ void free_netdev(struct net_device *dev) free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; #endif + free_percpu(dev->core_stats); + dev->core_stats = NULL; free_percpu(dev->xdp_bulkq); dev->xdp_bulkq = NULL; diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c index 8462f926ab45..541c7a72a28a 100644 --- a/net/core/gro_cells.c +++ b/net/core/gro_cells.c @@ -28,7 +28,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { drop: - atomic_long_inc(&dev->rx_dropped); + dev_core_stats_rx_dropped_inc(dev); kfree_skb(skb); res = NET_RX_DROP; goto unlock; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 7f250216433d..6ffef47e9be5 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -221,7 +221,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_len(skb); hsr_forward_skb(skb, master); } else { - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); dev_kfree_skb_any(skb); } return NETDEV_TX_OK; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 39bce5d764de..3e3448ada1bb 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -143,7 +143,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur segs = skb_gso_segment(skb, esp_features); if (IS_ERR(segs)) { kfree_skb(skb); - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); return NULL; } else { consume_skb(skb); -- cgit v1.2.3-71-gd317 From f2aa197e4794bf4c2c0c9570684f86e6fa103e8b Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Sat, 5 Mar 2022 11:41:03 +0800 Subject: cgroup: Fix suspicious rcu_dereference_check() usage warning task_css_set_check() will use rcu_dereference_check() to check for rcu_read_lock_held() on the read-side, which is not true after commit dc6e0818bc9a ("sched/cpuacct: Optimize away RCU read lock"). This commit drop explicit rcu_read_lock(), change to RCU-sched read-side critical section. So fix the RCU warning by adding check for rcu_read_lock_sched_held(). Fixes: dc6e0818bc9a ("sched/cpuacct: Optimize away RCU read lock") Reported-by: Linux Kernel Functional Testing Reported-by: syzbot+16e3f2c77e7c5a0113f9@syzkaller.appspotmail.com Signed-off-by: Chengming Zhou Signed-off-by: Peter Zijlstra (Intel) Acked-by: Tejun Heo Tested-by: Zhouyi Zhou Tested-by: Marek Szyprowski Link: https://lore.kernel.org/r/20220305034103.57123-1-zhouchengming@bytedance.com --- include/linux/cgroup.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1e356c222756..0d1ada8968d7 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -450,6 +450,7 @@ extern struct mutex cgroup_mutex; extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ + rcu_read_lock_sched_held() || \ lockdep_is_held(&cgroup_mutex) || \ lockdep_is_held(&css_set_lock) || \ ((task)->flags & PF_EXITING) || (__c)) -- cgit v1.2.3-71-gd317 From 6f98a4bfee72c22f50aedb39fb761567969865fe Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 7 Feb 2022 17:19:24 +0100 Subject: random: block in /dev/urandom This topic has come up countless times, and usually doesn't go anywhere. This time I thought I'd bring it up with a slightly narrower focus, updated for some developments over the last three years: we finally can make /dev/urandom always secure, in light of the fact that our RNG is now always seeded. Ever since Linus' 50ee7529ec45 ("random: try to actively add entropy rather than passively wait for it"), the RNG does a haveged-style jitter dance around the scheduler, in order to produce entropy (and credit it) for the case when we're stuck in wait_for_random_bytes(). How ever you feel about the Linus Jitter Dance is beside the point: it's been there for three years and usually gets the RNG initialized in a second or so. As a matter of fact, this is what happens currently when people use getrandom(). It's already there and working, and most people have been using it for years without realizing. So, given that the kernel has grown this mechanism for seeding itself from nothing, and that this procedure happens pretty fast, maybe there's no point any longer in having /dev/urandom give insecure bytes. In the past we didn't want the boot process to deadlock, which was understandable. But now, in the worst case, a second goes by, and the problem is resolved. It seems like maybe we're finally at a point when we can get rid of the infamous "urandom read hole". The one slight drawback is that the Linus Jitter Dance relies on random_ get_entropy() being implemented. The first lines of try_to_generate_ entropy() are: stack.now = random_get_entropy(); if (stack.now == random_get_entropy()) return; On most platforms, random_get_entropy() is simply aliased to get_cycles(). The number of machines without a cycle counter or some other implementation of random_get_entropy() in 2022, which can also run a mainline kernel, and at the same time have a both broken and out of date userspace that relies on /dev/urandom never blocking at boot is thought to be exceedingly low. And to be clear: those museum pieces without cycle counters will continue to run Linux just fine, and even /dev/urandom will be operable just like before; the RNG just needs to be seeded first through the usual means, which should already be the case now. On systems that really do want unseeded randomness, we already offer getrandom(GRND_INSECURE), which is in use by, e.g., systemd for seeding their hash tables at boot. Nothing in this commit would affect GRND_INSECURE, and it remains the means of getting those types of random numbers. This patch goes a long way toward eliminating a long overdue userspace crypto footgun. After several decades of endless user confusion, we will finally be able to say, "use any single one of our random interfaces and you'll be fine. They're all the same. It doesn't matter." And that, I think, is really something. Finally all of those blog posts and disagreeing forums and contradictory articles will all become correct about whatever they happened to recommend, and along with it, a whole class of vulnerabilities eliminated. With very minimal downside, we're finally in a position where we can make this change. Cc: Dinh Nguyen Cc: Nick Hu Cc: Max Filippov Cc: Palmer Dabbelt Cc: David S. Miller Cc: Yoshinori Sato Cc: Michal Simek Cc: Borislav Petkov Cc: Guo Ren Cc: Geert Uytterhoeven Cc: Joshua Kinard Cc: David Laight Cc: Dominik Brodowski Cc: Eric Biggers Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Andy Lutomirski Cc: Kees Cook Cc: Lennart Poettering Cc: Konstantin Ryabitsev Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/mem.c | 2 +- drivers/char/random.c | 72 ++++++++++++-------------------------------------- include/linux/random.h | 2 +- 3 files changed, 19 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/mem.c b/drivers/char/mem.c index cc296f0823bd..9f586025dbe6 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -707,7 +707,7 @@ static const struct memdev { [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &urandom_fops, 0 }, + [9] = { "urandom", 0666, &random_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 8171c3bbf460..9831797e0699 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -89,17 +89,14 @@ static LIST_HEAD(random_ready_list); /* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); -static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); static int ratelimit_disable __read_mostly; module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); /* * Returns whether or not the input pool has been seeded and thus guaranteed - * to supply cryptographically secure random numbers. This applies to: the - * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, - * ,u64,int,long} family of functions. + * to supply cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). * * Returns: true if the input pool has been seeded. * false if the input pool has not been seeded. @@ -115,10 +112,10 @@ static void try_to_generate_entropy(void); /* * Wait for the input pool to be seeded and thus guaranteed to supply - * cryptographically secure random numbers. This applies to: the /dev/urandom - * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} - * family of functions. Using any of these functions without first calling - * this function forfeits the guarantee of security. + * cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). Using any + * of these functions without first calling this function means that + * the returned numbers might not be cryptographically secure. * * Returns: 0 if the input pool has been seeded. * -ERESTARTSYS if the function was interrupted by a signal. @@ -256,10 +253,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * unsigned long get_random_long() * * These interfaces will return the requested number of random bytes - * into the given buffer or as a return value. This is equivalent to - * a read from /dev/urandom. The integer family of functions may be - * higher performance for one-off random integers, because they do a - * bit of buffering. + * into the given buffer or as a return value. The returned numbers are + * the same as those of getrandom(0). The integer family of functions may + * be higher performance for one-off random integers, because they do a + * bit of buffering and do not invoke reseeding. * *********************************************************************/ @@ -336,11 +333,6 @@ static void crng_reseed(void) unseeded_warning.missed); unseeded_warning.missed = 0; } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } } } @@ -993,10 +985,8 @@ int __init rand_initialize(void) pr_notice("crng init done (trusting CPU's manufacturer)\n"); } - if (ratelimit_disable) { - urandom_warning.interval = 0; + if (ratelimit_disable) unseeded_warning.interval = 0; - } return 0; } @@ -1386,20 +1376,16 @@ static void try_to_generate_entropy(void) * getrandom(2) is the primary modern interface into the RNG and should * be used in preference to anything else. * - * Reading from /dev/random has the same functionality as calling - * getrandom(2) with flags=0. In earlier versions, however, it had - * vastly different semantics and should therefore be avoided, to - * prevent backwards compatibility issues. - * - * Reading from /dev/urandom has the same functionality as calling - * getrandom(2) with flags=GRND_INSECURE. Because it does not block - * waiting for the RNG to be ready, it should not be used. + * Reading from /dev/random and /dev/urandom both have the same effect + * as calling getrandom(2) with flags=0. (In earlier versions, however, + * they each had different semantics.) * * Writing to either /dev/random or /dev/urandom adds entropy to * the input pool but does not credit it. * - * Polling on /dev/random indicates when the RNG is initialized, on - * the read side, and when it wants new entropy, on the write side. + * Polling on /dev/random or /dev/urandom indicates when the RNG + * is initialized, on the read side, and when it wants new entropy, + * on the write side. * * Both /dev/random and /dev/urandom have the same set of ioctls for * adding entropy, getting the entropy count, zeroing the count, and @@ -1484,21 +1470,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer, return (ssize_t)count; } -static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - static int maxwarn = 10; - - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); - } - - return get_random_bytes_user(buf, nbytes); -} - static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { @@ -1585,15 +1556,6 @@ const struct file_operations random_fops = { .llseek = noop_llseek, }; -const struct file_operations urandom_fops = { - .read = urandom_read, - .write = random_write, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -}; - /******************************************************************** * diff --git a/include/linux/random.h b/include/linux/random.h index 6148b8d1ccf3..725a4d08c0a0 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -44,7 +44,7 @@ extern void del_random_ready_callback(struct random_ready_callback *rdy); extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE -extern const struct file_operations random_fops, urandom_fops; +extern const struct file_operations random_fops; #endif u32 get_random_u32(void); -- cgit v1.2.3-71-gd317 From ae099e8e98fb01395228628be5a4661e3bd86fe4 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 23 Feb 2022 13:43:44 +0100 Subject: random: add mechanism for VM forks to reinitialize crng When a VM forks, we must immediately mix in additional information to the stream of random output so that two forks or a rollback don't produce the same stream of random numbers, which could have catastrophic cryptographic consequences. This commit adds a simple API, add_vmfork_ randomness(), for that, by force reseeding the crng. This has the added benefit of also draining the entropy pool and setting its timer back, so that any old entropy that was there prior -- which could have already been used by a different fork, or generally gone stale -- does not contribute to the accounting of the next 256 bits. Cc: Dominik Brodowski Cc: Theodore Ts'o Cc: Jann Horn Cc: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 50 +++++++++++++++++++++++++++++++++++--------------- include/linux/random.h | 1 + 2 files changed, 36 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index ede97649c5dd..2b323f6bd96c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -286,14 +286,14 @@ static DEFINE_PER_CPU(struct crng, crngs) = { }; /* Used by crng_reseed() to extract a new seed from the input pool. */ -static bool drain_entropy(void *buf, size_t nbytes); +static bool drain_entropy(void *buf, size_t nbytes, bool force); /* * This extracts a new crng key from the input pool, but only if there is a - * sufficient amount of entropy available, in order to mitigate bruteforcing - * of newly added bits. + * sufficient amount of entropy available or force is true, in order to + * mitigate bruteforcing of newly added bits. */ -static void crng_reseed(void) +static void crng_reseed(bool force) { unsigned long flags; unsigned long next_gen; @@ -301,7 +301,7 @@ static void crng_reseed(void) bool finalize_init = false; /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ - if (!drain_entropy(key, sizeof(key))) + if (!drain_entropy(key, sizeof(key), force)) return; /* @@ -398,7 +398,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], * in turn bumps the generation counter that we check below. */ if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL))) - crng_reseed(); + crng_reseed(false); local_lock_irqsave(&crngs.lock, flags); crng = raw_cpu_ptr(&crngs); @@ -763,10 +763,10 @@ EXPORT_SYMBOL(get_random_bytes_arch); * * Finally, extract entropy via these two, with the latter one * setting the entropy count to zero and extracting only if there - * is POOL_MIN_BITS entropy credited prior: + * is POOL_MIN_BITS entropy credited prior or force is true: * * static void extract_entropy(void *buf, size_t nbytes) - * static bool drain_entropy(void *buf, size_t nbytes) + * static bool drain_entropy(void *buf, size_t nbytes, bool force) * **********************************************************************/ @@ -824,7 +824,7 @@ static void credit_entropy_bits(size_t nbits) } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); if (crng_init < 2 && entropy_count >= POOL_MIN_BITS) - crng_reseed(); + crng_reseed(false); } /* @@ -874,16 +874,16 @@ static void extract_entropy(void *buf, size_t nbytes) } /* - * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we - * set the entropy count to zero (but don't actually touch any data). Only then - * can we extract a new key with extract_entropy(). + * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force + * is true, and then we set the entropy count to zero (but don't actually touch + * any data). Only then can we extract a new key with extract_entropy(). */ -static bool drain_entropy(void *buf, size_t nbytes) +static bool drain_entropy(void *buf, size_t nbytes, bool force) { unsigned int entropy_count; do { entropy_count = READ_ONCE(input_pool.entropy_count); - if (entropy_count < POOL_MIN_BITS) + if (!force && entropy_count < POOL_MIN_BITS) return false; } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); extract_entropy(buf, nbytes); @@ -907,6 +907,7 @@ static bool drain_entropy(void *buf, size_t nbytes) * void add_hwgenerator_randomness(const void *buffer, size_t count, * size_t entropy); * void add_bootloader_randomness(const void *buf, size_t size); + * void add_vmfork_randomness(const void *unique_vm_id, size_t size); * void add_interrupt_randomness(int irq); * * add_device_randomness() adds data to the input pool that @@ -938,6 +939,10 @@ static bool drain_entropy(void *buf, size_t nbytes) * add_device_randomness(), depending on whether or not the configuration * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. * + * add_vmfork_randomness() adds a unique (but not necessarily secret) ID + * representing the current instance of a VM to the pool, without crediting, + * and then force-reseeds the crng so that it takes effect immediately. + * * add_interrupt_randomness() uses the interrupt timing as random * inputs to the entropy pool. Using the cycle counters and the irq source * as inputs, it feeds the input pool roughly once a second or after 64 @@ -1163,6 +1168,21 @@ void add_bootloader_randomness(const void *buf, size_t size) } EXPORT_SYMBOL_GPL(add_bootloader_randomness); +/* + * Handle a new unique VM ID, which is unique, not secret, so we + * don't credit it, but we do immediately force a reseed after so + * that it's used by the crng posthaste. + */ +void add_vmfork_randomness(const void *unique_vm_id, size_t size) +{ + add_device_randomness(unique_vm_id, size); + if (crng_ready()) { + crng_reseed(true); + pr_notice("crng reseeded due to virtual machine fork\n"); + } +} +EXPORT_SYMBOL_GPL(add_vmfork_randomness); + struct fast_pool { union { u32 pool32[4]; @@ -1534,7 +1554,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(); + crng_reseed(false); return 0; default: return -EINVAL; diff --git a/include/linux/random.h b/include/linux/random.h index 725a4d08c0a0..117468f3a92e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq) __latent_entropy; extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); +extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-71-gd317 From d273845ecb0e0626842782a4497f0c5876139ec3 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 25 Feb 2022 16:55:52 +0100 Subject: ACPI: allow longer device IDs We create a list of ACPI "PNP" IDs which contains _HID, _CID, and CLS entries of the respective devices. However, when making structs for matching, we squeeze those IDs into acpi_device_id, which only has 9 bytes space to store the identifier. The subsystem actually captures the full length of the IDs, and the modalias has the full length, but this struct we use for matching is limited. It originally had 16 bytes, but was changed to only have 9 in 6543becf26ff ("mod/file2alias: make modalias generation safe for cross compiling"), presumably on the theory that it would match the ACPI spec so it didn't matter. Unfortunately, while most people adhere to the ACPI specs, Microsoft decided that its VM Generation Counter device [1] should only be identifiable by _CID with a value of "VM_Gen_Counter", which is longer than 9 characters. To allow device drivers to match identifiers that exceed the 9 byte limit, this simply ups the length to 16, just like it was before the aforementioned commit. Empirical testing indicates that this doesn't actually increase vmlinux size on 64-bit, because the ulong in the same struct caused there to be 7 bytes of padding anyway, and when doing a s/M/Y/g i386_defconfig build, the bzImage only increased by 0.0055%, so negligible. This patch is a prerequisite to add support for VMGenID in Linux, the subsequent patch in this series. It has been confirmed to also work on the udev/modalias side in userspace. [1] https://download.microsoft.com/download/3/1/C/31CFC307-98CA-4CA5-914C-D9772691E214/VirtualMachineGenerationID.docx Signed-off-by: Alexander Graf Co-developed-by: Jason A. Donenfeld [Jason: reworked commit message a bit, went with len=16 approach.] Cc: Mika Westerberg Cc: Andy Shevchenko Cc: Len Brown Cc: Greg Kroah-Hartman Reviewed-by: Ard Biesheuvel Acked-by: Hans de Goede Acked-by: Rafael J. Wysocki Signed-off-by: Jason A. Donenfeld --- include/linux/mod_devicetable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4bb71979a8fd..5da5d990ff58 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -211,7 +211,7 @@ struct css_device_id { kernel_ulong_t driver_data; }; -#define ACPI_ID_LEN 9 +#define ACPI_ID_LEN 16 struct acpi_device_id { __u8 id[ACPI_ID_LEN]; -- cgit v1.2.3-71-gd317 From a4107d34f960df99ca07fa8eb022425a804f59f3 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 15:14:04 +0100 Subject: random: do not export add_vmfork_randomness() unless needed Since add_vmfork_randomness() is only called from vmgenid.o, we can guard it in CONFIG_VMGENID, similarly to how we do with add_disk_randomness() and CONFIG_BLOCK. If we ever have multiple things calling into add_vmfork_randomness(), we can add another shared Kconfig symbol for that, but for now, this is good enough. Even though add_vmfork_randomess() is a pretty small function, removing it means that there are only calls to crng_reseed(false) and none to crng_reseed(true), which means the compiler can constant propagate the false, removing branches from crng_reseed() and its descendants. Additionally, we don't even need the symbol to be exported if CONFIG_VMGENID is not a module, so conditionalize that too. Cc: Dominik Brodowski Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 4 ++++ include/linux/random.h | 2 ++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index 2b323f6bd96c..bc2a4f7e3655 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1168,6 +1168,7 @@ void add_bootloader_randomness(const void *buf, size_t size) } EXPORT_SYMBOL_GPL(add_bootloader_randomness); +#if IS_ENABLED(CONFIG_VMGENID) /* * Handle a new unique VM ID, which is unique, not secret, so we * don't credit it, but we do immediately force a reseed after so @@ -1181,7 +1182,10 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) pr_notice("crng reseeded due to virtual machine fork\n"); } } +#if IS_MODULE(CONFIG_VMGENID) EXPORT_SYMBOL_GPL(add_vmfork_randomness); +#endif +#endif struct fast_pool { union { diff --git a/include/linux/random.h b/include/linux/random.h index 117468f3a92e..f209f1a78899 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,7 +34,9 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq) __latent_entropy; extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); +#if IS_ENABLED(CONFIG_VMGENID) extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); +#endif extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-71-gd317 From 5acd35487dc911541672b3ffc322851769c32a56 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 20:03:49 +0100 Subject: random: replace custom notifier chain with standard one We previously rolled our own randomness readiness notifier, which only has two users in the whole kernel. Replace this with a more standard atomic notifier block that serves the same purpose with less code. Also unexport the symbols, because no modules use it, only unconditional builtins. The only drawback is that it's possible for a notification handler returning the "stop" code to prevent further processing, but given that there are only two users, and that we're unexporting this anyway, that doesn't seem like a significant drawback for the simplification we receive here. Cc: Greg Kroah-Hartman Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 67 ++++++++++++++------------------------------------ include/linux/random.h | 10 +++----- lib/random32.c | 12 +++++---- lib/vsprintf.c | 10 +++++--- 4 files changed, 35 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index bc2a4f7e3655..aa7bc9a3a864 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -83,8 +83,8 @@ static int crng_init = 0; /* Various types of waiters for crng_init->2 transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; -static DEFINE_SPINLOCK(random_ready_list_lock); -static LIST_HEAD(random_ready_list); +static DEFINE_SPINLOCK(random_ready_chain_lock); +static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = @@ -144,72 +144,43 @@ EXPORT_SYMBOL(wait_for_random_bytes); * * returns: 0 if callback is successfully added * -EALREADY if pool is already initialised (callback not called) - * -ENOENT if module for callback is not alive */ -int add_random_ready_callback(struct random_ready_callback *rdy) +int register_random_ready_notifier(struct notifier_block *nb) { - struct module *owner; unsigned long flags; - int err = -EALREADY; + int ret = -EALREADY; if (crng_ready()) - return err; - - owner = rdy->owner; - if (!try_module_get(owner)) - return -ENOENT; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (crng_ready()) - goto out; - - owner = NULL; - - list_add(&rdy->list, &random_ready_list); - err = 0; - -out: - spin_unlock_irqrestore(&random_ready_list_lock, flags); - - module_put(owner); + return ret; - return err; + spin_lock_irqsave(&random_ready_chain_lock, flags); + if (!crng_ready()) + ret = raw_notifier_chain_register(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -EXPORT_SYMBOL(add_random_ready_callback); /* * Delete a previously registered readiness callback function. */ -void del_random_ready_callback(struct random_ready_callback *rdy) +int unregister_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; - struct module *owner = NULL; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (!list_empty(&rdy->list)) { - list_del_init(&rdy->list); - owner = rdy->owner; - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); + int ret; - module_put(owner); + spin_lock_irqsave(&random_ready_chain_lock, flags); + ret = raw_notifier_chain_unregister(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -EXPORT_SYMBOL(del_random_ready_callback); static void process_random_ready_list(void) { unsigned long flags; - struct random_ready_callback *rdy, *tmp; - spin_lock_irqsave(&random_ready_list_lock, flags); - list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { - struct module *owner = rdy->owner; - - list_del_init(&rdy->list); - rdy->func(rdy); - module_put(owner); - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); + spin_lock_irqsave(&random_ready_chain_lock, flags); + raw_notifier_call_chain(&random_ready_chain, 0, NULL); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); } #define warn_unseeded_randomness(previous) \ diff --git a/include/linux/random.h b/include/linux/random.h index f209f1a78899..fab1ab0563b4 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -10,11 +10,7 @@ #include -struct random_ready_callback { - struct list_head list; - void (*func)(struct random_ready_callback *rdy); - struct module *owner; -}; +struct notifier_block; extern void add_device_randomness(const void *, size_t); extern void add_bootloader_randomness(const void *, size_t); @@ -42,8 +38,8 @@ extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); extern int __init rand_initialize(void); extern bool rng_is_initialized(void); -extern int add_random_ready_callback(struct random_ready_callback *rdy); -extern void del_random_ready_callback(struct random_ready_callback *rdy); +extern int register_random_ready_notifier(struct notifier_block *nb); +extern int unregister_random_ready_notifier(struct notifier_block *nb); extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE diff --git a/lib/random32.c b/lib/random32.c index 3c19820796d0..976632003ec6 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -551,9 +551,11 @@ static void prandom_reseed(struct timer_list *unused) * To avoid worrying about whether it's safe to delay that interrupt * long enough to seed all CPUs, just schedule an immediate timer event. */ -static void prandom_timer_start(struct random_ready_callback *unused) +static int prandom_timer_start(struct notifier_block *nb, + unsigned long action, void *data) { mod_timer(&seed_timer, jiffies); + return 0; } #ifdef CONFIG_RANDOM32_SELFTEST @@ -617,13 +619,13 @@ core_initcall(prandom32_state_selftest); */ static int __init prandom_init_late(void) { - static struct random_ready_callback random_ready = { - .func = prandom_timer_start + static struct notifier_block random_ready = { + .notifier_call = prandom_timer_start }; - int ret = add_random_ready_callback(&random_ready); + int ret = register_random_ready_notifier(&random_ready); if (ret == -EALREADY) { - prandom_timer_start(&random_ready); + prandom_timer_start(&random_ready, 0, NULL); ret = 0; } return ret; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3b8129dd374c..36574a806a81 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -757,14 +757,16 @@ static void enable_ptr_key_workfn(struct work_struct *work) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); -static void fill_random_ptr_key(struct random_ready_callback *unused) +static int fill_random_ptr_key(struct notifier_block *nb, + unsigned long action, void *data) { /* This may be in an interrupt handler. */ queue_work(system_unbound_wq, &enable_ptr_key_work); + return 0; } -static struct random_ready_callback random_ready = { - .func = fill_random_ptr_key +static struct notifier_block random_ready = { + .notifier_call = fill_random_ptr_key }; static int __init initialize_ptr_random(void) @@ -778,7 +780,7 @@ static int __init initialize_ptr_random(void) return 0; } - ret = add_random_ready_callback(&random_ready); + ret = register_random_ready_notifier(&random_ready); if (!ret) { return 0; } else if (ret == -EALREADY) { -- cgit v1.2.3-71-gd317 From f3c2682bad7bc6033c837e9c66e5af881fe8d465 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 20:22:39 +0100 Subject: random: provide notifier for VM fork Drivers such as WireGuard need to learn when VMs fork in order to clear sessions. This commit provides a simple notifier_block for that, with a register and unregister function. When no VM fork detection is compiled in, this turns into a no-op, similar to how the power notifier works. Cc: Dominik Brodowski Cc: Theodore Ts'o Reviewed-by: Greg Kroah-Hartman Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 15 +++++++++++++++ include/linux/random.h | 5 +++++ 2 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index aa7bc9a3a864..d9ca441c14bd 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1140,6 +1140,8 @@ void add_bootloader_randomness(const void *buf, size_t size) EXPORT_SYMBOL_GPL(add_bootloader_randomness); #if IS_ENABLED(CONFIG_VMGENID) +static BLOCKING_NOTIFIER_HEAD(vmfork_chain); + /* * Handle a new unique VM ID, which is unique, not secret, so we * don't credit it, but we do immediately force a reseed after so @@ -1152,10 +1154,23 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) crng_reseed(true); pr_notice("crng reseeded due to virtual machine fork\n"); } + blocking_notifier_call_chain(&vmfork_chain, 0, NULL); } #if IS_MODULE(CONFIG_VMGENID) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif + +int register_random_vmfork_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmfork_chain, nb); +} +EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); + +int unregister_random_vmfork_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmfork_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); #endif struct fast_pool { diff --git a/include/linux/random.h b/include/linux/random.h index fab1ab0563b4..c0baffe7afb1 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -32,6 +32,11 @@ extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #if IS_ENABLED(CONFIG_VMGENID) extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); +extern int register_random_vmfork_notifier(struct notifier_block *nb); +extern int unregister_random_vmfork_notifier(struct notifier_block *nb); +#else +static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } +static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } #endif extern void get_random_bytes(void *buf, size_t nbytes); -- cgit v1.2.3-71-gd317 From df227dc8a68d81b7fe3416eca16d1f21d0b537f0 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Wed, 9 Feb 2022 22:16:31 -0600 Subject: mailbox: ti-msgmgr: Operate mailbox in polled mode during system suspend During the system suspend path we must set all queues to operate in polled mode as it is possible for any protocol built using this mailbox, such as TISCI, to require communication during the no irq phase of suspend, and we cannot rely on interrupts there. Polled mode is implemented by allowing the mailbox user to define an RX channel as part of the message that is sent which is what gets polled for a response. If polled mode is enabled, this will immediately be polled for a response at the end of the mailbox send_data op before returning success for the data send or timing out if no response is received. Finally, to ensure polled mode is always enabled during system suspend, iterate through all queues to set RX queues to polled mode during system suspend and disable polled mode for all in the resume handler. Signed-off-by: Dave Gerlach Signed-off-by: Jassi Brar --- drivers/mailbox/ti-msgmgr.c | 93 +++++++++++++++++++++++++++++++++++++++- include/linux/soc/ti/ti-msgmgr.h | 8 +++- 2 files changed, 98 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c index f860cd0c907a..ddac423ac1a9 100644 --- a/drivers/mailbox/ti-msgmgr.c +++ b/drivers/mailbox/ti-msgmgr.c @@ -2,7 +2,7 @@ /* * Texas Instruments' Message Manager Driver * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -100,6 +101,7 @@ struct ti_msgmgr_desc { * @queue_ctrl: Queue Control register * @chan: Mailbox channel * @rx_buff: Receive buffer pointer allocated at probe, max_message_size + * @polled_rx_mode: Use polling for rx instead of interrupts */ struct ti_queue_inst { char name[30]; @@ -113,6 +115,7 @@ struct ti_queue_inst { void __iomem *queue_ctrl; struct mbox_chan *chan; u32 *rx_buff; + bool polled_rx_mode; }; /** @@ -237,6 +240,26 @@ static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst return 0; } +static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us) +{ + struct device *dev = chan->mbox->dev; + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst = chan->con_priv; + const struct ti_msgmgr_desc *desc = inst->desc; + int msg_count; + int ret; + + ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count, + (msg_count & desc->status_cnt_mask), + 10, timeout_us); + if (ret != 0) + return ret; + + ti_msgmgr_queue_rx_data(chan, qinst, desc); + + return 0; +} + /** * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue * @irq: Interrupt number @@ -346,6 +369,17 @@ static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) return msg_count ? false : true; } +static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan) +{ + struct ti_queue_inst *qinst; + + if (!chan) + return false; + + qinst = chan->con_priv; + return qinst->polled_rx_mode; +} + /** * ti_msgmgr_send_data() - Send data * @chan: Channel Pointer @@ -363,6 +397,7 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) struct ti_msgmgr_message *message = data; void __iomem *data_reg; u32 *word_data; + int ret = 0; if (WARN_ON(!inst)) { dev_err(dev, "no platform drv data??\n"); @@ -404,7 +439,12 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) if (data_reg <= qinst->queue_buff_end) writel(0, qinst->queue_buff_end); - return 0; + /* If we are in polled mode, wait for a response before proceeding */ + if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx)) + ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx, + message->timeout_rx_ms * 1000); + + return ret; } /** @@ -652,6 +692,54 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev, return 0; } +static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable) +{ + if (enable) { + disable_irq(qinst->irq); + qinst->polled_rx_mode = true; + } else { + enable_irq(qinst->irq); + qinst->polled_rx_mode = false; + } + + return 0; +} + +static int ti_msgmgr_suspend(struct device *dev) +{ + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst; + int i; + + /* + * We must switch operation to polled mode now as drivers and the genpd + * layer may make late TI SCI calls to change clock and device states + * from the noirq phase of suspend. + */ + for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { + if (!qinst->is_tx) + ti_msgmgr_queue_rx_set_polled_mode(qinst, true); + } + + return 0; +} + +static int ti_msgmgr_resume(struct device *dev) +{ + struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); + struct ti_queue_inst *qinst; + int i; + + for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) { + if (!qinst->is_tx) + ti_msgmgr_queue_rx_set_polled_mode(qinst, false); + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume); + /* Queue operations */ static const struct mbox_chan_ops ti_msgmgr_chan_ops = { .startup = ti_msgmgr_queue_startup, @@ -839,6 +927,7 @@ static struct platform_driver ti_msgmgr_driver = { .driver = { .name = "ti-msgmgr", .of_match_table = of_match_ptr(ti_msgmgr_of_match), + .pm = &ti_msgmgr_pm_ops, }, }; module_platform_driver(ti_msgmgr_driver); diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index 1f6e76d423cf..69a8d7682c4b 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify @@ -17,10 +17,14 @@ #ifndef TI_MSGMGR_H #define TI_MSGMGR_H +struct mbox_chan; + /** * struct ti_msgmgr_message - Message Manager structure * @len: Length of data in the Buffer * @buf: Buffer pointer + * @chan_rx: Expected channel for response, must be provided to use polled rx + * @timeout_rx_ms: Timeout value to use if polling for response * * This is the structure for data used in mbox_send_message * the length of data buffer used depends on the SoC integration @@ -30,6 +34,8 @@ struct ti_msgmgr_message { size_t len; u8 *buf; + struct mbox_chan *chan_rx; + int timeout_rx_ms; }; #endif /* TI_MSGMGR_H */ -- cgit v1.2.3-71-gd317 From a41b05edfedb939440e83666f23de3ef9af33acf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: SUNRPC/auth: async tasks mustn't block waiting for memory When memory is short, new worker threads cannot be created and we depend on the minimum one rpciod thread to be able to handle everything. So it must not block waiting for memory. mempools are particularly a problem as memory can only be released back to the mempool by an async rpc task running. If all available workqueue threads are waiting on the mempool, no thread is available to return anything. lookup_cred() can block on a mempool or kmalloc - and this can cause deadlocks. So add a new RPCAUTH_LOOKUP flag for async lookups and don't block on memory. If the -ENOMEM gets back to call_refreshresult(), wait a short while and try again. HZ>>4 is chosen as it is used elsewhere for -ENOMEM retries. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- include/linux/sunrpc/auth.h | 1 + net/sunrpc/auth.c | 6 +++++- net/sunrpc/auth_gss/auth_gss.c | 6 +++++- net/sunrpc/auth_unix.c | 10 ++++++++-- net/sunrpc/clnt.c | 3 +++ 5 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 98da816b5fc2..3e6ce288a7fc 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -99,6 +99,7 @@ struct rpc_auth_create_args { /* Flags for rpcauth_lookupcred() */ #define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ +#define RPCAUTH_LOOKUP_ASYNC 0x02 /* Don't block waiting for memory */ /* * Client authentication ops diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index a9f0d17fdb0d..6bfa19f9fa6a 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -615,6 +615,8 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags) }; struct rpc_cred *ret; + if (RPC_IS_ASYNC(task)) + lookupflags |= RPCAUTH_LOOKUP_ASYNC; ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags); put_cred(acred.cred); return ret; @@ -631,6 +633,8 @@ rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags) if (!acred.principal) return NULL; + if (RPC_IS_ASYNC(task)) + lookupflags |= RPCAUTH_LOOKUP_ASYNC; return auth->au_ops->lookup_cred(auth, &acred, lookupflags); } @@ -654,7 +658,7 @@ rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags) }; if (flags & RPC_TASK_ASYNC) - lookupflags |= RPCAUTH_LOOKUP_NEW; + lookupflags |= RPCAUTH_LOOKUP_NEW | RPCAUTH_LOOKUP_ASYNC; if (task->tk_op_cred) /* Task must use exactly this rpc_cred */ new = get_rpccred(task->tk_op_cred); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index affd64a54f02..ac0828108204 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1341,7 +1341,11 @@ gss_hash_cred(struct auth_cred *acred, unsigned int hashbits) static struct rpc_cred * gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - return rpcauth_lookup_credcache(auth, acred, flags, GFP_KERNEL); + gfp_t gfp = GFP_KERNEL; + + if (flags & RPCAUTH_LOOKUP_ASYNC) + gfp = GFP_NOWAIT | __GFP_NOWARN; + return rpcauth_lookup_credcache(auth, acred, flags, gfp); } static struct rpc_cred * diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 3600d8641644..c629d366030e 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -43,8 +43,14 @@ unx_destroy(struct rpc_auth *auth) static struct rpc_cred * unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { - struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_KERNEL); - + gfp_t gfp = GFP_KERNEL; + struct rpc_cred *ret; + + if (flags & RPCAUTH_LOOKUP_ASYNC) + gfp = GFP_NOWAIT | __GFP_NOWARN; + ret = mempool_alloc(unix_pool, gfp); + if (!ret) + return ERR_PTR(-ENOMEM); rpcauth_init_cred(ret, acred, auth, &unix_credops); ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; return ret; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 97165a545cb3..9556eb7b065b 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1745,6 +1745,9 @@ call_refreshresult(struct rpc_task *task) task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); return; + case -ENOMEM: + rpc_delay(task, HZ >> 4); + return; } trace_rpc_refresh_status(task); rpc_call_rpcerror(task, status); -- cgit v1.2.3-71-gd317 From 89c2be8a951654758dffeaaa6272328d9c8f29be Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: discard NFS_RPC_SWAPFLAGS and RPC_TASK_ROOTCREDS NFS_RPC_SWAPFLAGS is only used for READ requests. It sets RPC_TASK_SWAPPER which gives some memory-allocation priority to requests. This is not needed for swap READ - though it is for writes where it is set via a different mechanism. RPC_TASK_ROOTCREDS causes the 'machine' credential to be used. This is not needed as the root credential is saved when the swap file is opened, and this is used for all IO. So NFS_RPC_SWAPFLAGS isn't needed, and as it is the only user of RPC_TASK_ROOTCREDS, that isn't needed either. Remove both. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/read.c | 4 ---- include/linux/nfs_fs.h | 5 ----- include/linux/sunrpc/sched.h | 1 - include/trace/events/sunrpc.h | 1 - net/sunrpc/auth.c | 2 +- 5 files changed, 1 insertion(+), 12 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/read.c b/fs/nfs/read.c index e4c1a49b0126..5e7657374bc3 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -194,10 +194,6 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr, const struct nfs_rpc_ops *rpc_ops, struct rpc_task_setup *task_setup_data, int how) { - struct inode *inode = hdr->inode; - int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; - - task_setup_data->flags |= swap_flags; rpc_ops->read_setup(hdr, msg); trace_nfs_initiate_read(hdr); } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 3893386ceaed..9074ed0b65aa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -45,11 +45,6 @@ */ #define NFS_MAX_TRANSPORTS 16 -/* - * These are the default flags for swap requests - */ -#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) - /* * Size of the NFS directory verifier */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index db964bb63912..56710f8056d3 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -124,7 +124,6 @@ struct rpc_task_setup { #define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */ #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ -#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 29982d60b68a..ac33892da411 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -311,7 +311,6 @@ TRACE_EVENT(rpc_request, { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ - { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ { RPC_TASK_SOFT, "SOFT" }, \ diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 6bfa19f9fa6a..682fcd24bf43 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -670,7 +670,7 @@ rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags) /* If machine cred couldn't be bound, try a root cred */ if (new) ; - else if (cred == &machine_cred || (flags & RPC_TASK_ROOTCREDS)) + else if (cred == &machine_cred) new = rpcauth_bind_root_cred(task, lookupflags); else if (flags & RPC_TASK_NULLCREDS) new = authnull_ops.lookup_cred(NULL, NULL, 0); -- cgit v1.2.3-71-gd317 From 4dc73c679114a2f408567e2e44770ed934190db2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFSv4: keep state manager thread active if swap is enabled If we are swapping over NFSv4, we may not be able to allocate memory to start the state-manager thread at the time when we need it. So keep it always running when swap is enabled, and just signal it to start. This requires updating and testing the cl_swapper count on the root rpc_clnt after following all ->cl_parent links. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 15 ++++++++++++--- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 20 ++++++++++++++++++++ fs/nfs/nfs4state.c | 40 +++++++++++++++++++++++++++++++++------- include/linux/nfs_xdr.h | 2 ++ net/sunrpc/clnt.c | 2 ++ 6 files changed, 70 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 93c01aaa0a8d..d31bc430dce3 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -483,8 +483,9 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, { unsigned long blocks; long long isize; - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); - struct inode *inode = file->f_mapping->host; + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; spin_lock(&inode->i_lock); blocks = inode->i_blocks; @@ -497,14 +498,22 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, *span = sis->pages; + + if (cl->rpc_ops->enable_swap) + cl->rpc_ops->enable_swap(inode); + return rpc_clnt_swap_activate(clnt); } static void nfs_swap_deactivate(struct file *file) { - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; rpc_clnt_swap_deactivate(clnt); + if (cl->rpc_ops->disable_swap) + cl->rpc_ops->disable_swap(file_inode(file)); } const struct address_space_operations nfs_file_aops = { diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 84f39b6f1b1e..79df6e83881b 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -42,6 +42,7 @@ enum nfs4_client_state { NFS4CLNT_LEASE_MOVED, NFS4CLNT_DELEGATION_EXPIRED, NFS4CLNT_RUN_MANAGER, + NFS4CLNT_MANAGER_AVAILABLE, NFS4CLNT_RECALL_RUNNING, NFS4CLNT_RECALL_ANY_LAYOUT_READ, NFS4CLNT_RECALL_ANY_LAYOUT_RW, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fd8eece12e94..dd7a4c2a3f05 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -10468,6 +10468,24 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) return error + error2 + error3; } +static void nfs4_enable_swap(struct inode *inode) +{ + /* The state manager thread must always be running. + * It will notice the client is a swapper, and stay put. + */ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + + nfs4_schedule_state_manager(clp); +} + +static void nfs4_disable_swap(struct inode *inode) +{ + /* The state manager thread will now exit once it is + * woken. + */ + wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state); +} + static const struct inode_operations nfs4_dir_inode_operations = { .create = nfs_create, .lookup = nfs_lookup, @@ -10545,6 +10563,8 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .create_server = nfs4_create_server, .clone_server = nfs_clone_server, .discover_trunking = nfs4_discover_trunking, + .enable_swap = nfs4_enable_swap, + .disable_swap = nfs4_disable_swap, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 58054dfdf2b0..4b2ea239a537 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1207,10 +1207,17 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) + if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) { + wake_up_var(&clp->cl_state); return; + } + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); __module_get(THIS_MODULE); refcount_inc(&clp->cl_count); @@ -1226,6 +1233,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); nfs_put_client(clp); module_put(THIS_MODULE); } @@ -2680,12 +2688,8 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state); } - /* Did we race with an attempt to give us more work? */ - if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) - return; - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) - return; - memflags = memalloc_nofs_save(); + return; + } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2706,9 +2710,31 @@ out_drain: static int nfs4_run_state_manager(void *ptr) { struct nfs_client *clp = ptr; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; allow_signal(SIGKILL); +again: + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); nfs4_state_manager(clp); + if (atomic_read(&cl->cl_swapper)) { + wait_var_event_interruptible(&clp->cl_state, + test_bit(NFS4CLNT_RUN_MANAGER, + &clp->cl_state)); + if (atomic_read(&cl->cl_swapper) && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) + goto again; + /* Either no longer a swapper, or were signalled */ + } + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + + if (refcount_read(&clp->cl_count) > 1 && !signalled() && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) && + !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state)) + goto again; + nfs_put_client(clp); module_put_and_kthread_exit(0); return 0; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 82f7c2730b9a..49ba486aea5f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1797,6 +1797,8 @@ struct nfs_rpc_ops { struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); int (*discover_trunking)(struct nfs_server *, struct nfs_fh *); + void (*enable_swap)(struct inode *inode); + void (*disable_swap)(struct inode *inode); }; /* diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4117ea4caa2e..0f54a56d19d2 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -3069,6 +3069,8 @@ rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { + while (clnt != clnt->cl_parent) + clnt = clnt->cl_parent; if (atomic_inc_return(&clnt->cl_swapper) == 1) return rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_activate_callback, NULL); -- cgit v1.2.3-71-gd317 From 64158668ac8b31626a8ce48db4cad08496eb8340 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 7 Mar 2022 10:41:44 +1100 Subject: NFS: swap IO handling is slightly different for O_DIRECT IO 1/ Taking the i_rwsem for swap IO triggers lockdep warnings regarding possible deadlocks with "fs_reclaim". These deadlocks could, I believe, eventuate if a buffered read on the swapfile was attempted. We don't need coherence with the page cache for a swap file, and buffered writes are forbidden anyway. There is no other need for i_rwsem during direct IO. So never take it for swap_rw() 2/ generic_write_checks() explicitly forbids writes to swap, and performs checks that are not needed for swap. So bypass it for swap_rw(). Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 42 ++++++++++++++++++++++++++++-------------- fs/nfs/file.c | 4 ++-- include/linux/nfs_fs.h | 8 ++++---- 3 files changed, 34 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index eabfdab543c8..04aaf39a05cb 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -173,8 +173,8 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); if (iov_iter_rw(iter) == READ) - return nfs_file_direct_read(iocb, iter); - return nfs_file_direct_write(iocb, iter); + return nfs_file_direct_read(iocb, iter, true); + return nfs_file_direct_write(iocb, iter, true); } static void nfs_direct_release_pages(struct page **pages, unsigned int npages) @@ -425,6 +425,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers into which to read data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if @@ -440,7 +441,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -482,12 +484,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) if (iter_is_iovec(iter)) dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; - nfs_start_io_direct(inode); + if (!swap) + nfs_start_io_direct(inode); NFS_I(inode)->read_io += count; requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); - nfs_end_io_direct(inode); + if (!swap) + nfs_end_io_direct(inode); if (requested > 0) { result = nfs_direct_wait(dreq); @@ -876,6 +880,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers from which to write data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode @@ -892,7 +897,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { ssize_t result, requested; size_t count; @@ -906,7 +912,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", file, iov_iter_count(iter), (long long) iocb->ki_pos); - result = generic_write_checks(iocb, iter); + if (swap) + /* bypass generic checks */ + result = iov_iter_count(iter); + else + result = generic_write_checks(iocb, iter); if (result <= 0) return result; count = result; @@ -937,16 +947,20 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dreq->iocb = iocb; pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); - nfs_start_io_direct(inode); + if (swap) { + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + } else { + nfs_start_io_direct(inode); - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); - if (mapping->nrpages) { - invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); - } + if (mapping->nrpages) { + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + } - nfs_end_io_direct(inode); + nfs_end_io_direct(inode); + } if (requested > 0) { result = nfs_direct_wait(dreq); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index d31bc430dce3..81c80548a5c6 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -157,7 +157,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ssize_t result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_read(iocb, to); + return nfs_file_direct_read(iocb, to, false); dprintk("NFS: read(%pD2, %zu@%lu)\n", iocb->ki_filp, @@ -623,7 +623,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) return result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_write(iocb, from); + return nfs_file_direct_write(iocb, from, false); dprintk("NFS: write(%pD2, %zu@%Ld)\n", file, iov_iter_count(from), (long long) iocb->ki_pos); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 9074ed0b65aa..c47c448befc8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -508,10 +508,10 @@ static inline const struct cred *nfs_file_cred(struct file *file) * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); -extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter); -extern ssize_t nfs_file_direct_write(struct kiocb *iocb, - struct iov_iter *iter); +ssize_t nfs_file_direct_read(struct kiocb *iocb, + struct iov_iter *iter, bool swap); +ssize_t nfs_file_direct_write(struct kiocb *iocb, + struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c -- cgit v1.2.3-71-gd317 From 1f4a5983d623d6dbda4cc7587a2d9d798e0d4035 Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Fri, 11 Mar 2022 17:04:03 +0800 Subject: net: macvlan: add net device refcount tracker Add net device refcount tracker to macvlan. Signed-off-by: Ziyang Xuan Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 4 ++-- include/linux/if_macvlan.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b339581e36aa..069e8824c264 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -912,7 +912,7 @@ static int macvlan_init(struct net_device *dev) port->count += 1; /* Get macvlan's reference to lowerdev */ - dev_hold(lowerdev); + dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL); return 0; } @@ -1181,7 +1181,7 @@ static void macvlan_dev_free(struct net_device *dev) struct macvlan_dev *vlan = netdev_priv(dev); /* Get rid of the macvlan's reference to lowerdev */ - dev_put(vlan->lowerdev); + dev_put_track(vlan->lowerdev, &vlan->dev_tracker); } void macvlan_common_setup(struct net_device *dev) diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 10c94a3936ca..b42294739063 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -21,6 +21,7 @@ struct macvlan_dev { struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; + netdevice_tracker dev_tracker; void *accel_priv; struct vlan_pcpu_stats __percpu *pcpu_stats; -- cgit v1.2.3-71-gd317 From fbd9a2ceba5c74bbfa19cf257ae4b4b2c820860d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Mar 2022 16:03:42 +0100 Subject: net: Add lockdep asserts to ____napi_schedule(). ____napi_schedule() needs to be invoked with disabled interrupts due to __raise_softirq_irqoff (in order not to corrupt the per-CPU list). ____napi_schedule() needs also to be invoked from an interrupt context so that the raised-softirq is processed while the interrupt context is left. Add lockdep asserts for both conditions. While this is the second time the irq/softirq check is needed, provide a generic lockdep_assert_softirq_will_run() which is used by both caller. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- include/linux/lockdep.h | 7 +++++++ net/core/dev.c | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 467b94257105..0cc65d216701 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -329,6 +329,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_assert_none_held_once() \ lockdep_assert_once(!current->lockdep_depth) +/* + * Ensure that softirq is handled within the callchain and not delayed and + * handled by chance. + */ +#define lockdep_assert_softirq_will_run() \ + lockdep_assert_once(hardirq_count() | softirq_count()) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -414,6 +420,7 @@ extern int lockdep_is_held(const void *); #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) +#define lockdep_assert_softirq_will_run() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/net/core/dev.c b/net/core/dev.c index 8d25ec5b3af7..75bab5b0dbae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd, { struct task_struct *thread; + lockdep_assert_softirq_will_run(); + lockdep_assert_irqs_disabled(); + if (test_bit(NAPI_STATE_THREADED, &napi->state)) { /* Paired with smp_mb__before_atomic() in * napi_enable()/dev_set_threaded(). @@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb) { int ret; - lockdep_assert_once(hardirq_count() | softirq_count()); + lockdep_assert_softirq_will_run(); trace_netif_rx_entry(skb); ret = netif_rx_internal(skb); -- cgit v1.2.3-71-gd317 From 871129332d74c9e94bd110932ac4445833995639 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Wed, 4 Sep 2019 12:13:25 -0700 Subject: fs: export rw_verify_area() I'm adding btrfs ioctls to read and write compressed data, and rather than duplicating the checks in rw_verify_area(), let's just export it. Reviewed-by: Josef Bacik Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/internal.h | 5 ----- fs/read_write.c | 1 + include/linux/fs.h | 1 + 3 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/internal.h b/fs/internal.h index 8590c973c2f4..711bdc00ec7c 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -157,11 +157,6 @@ extern char *simple_dname(struct dentry *, char *, int); extern void dput_to_list(struct dentry *, struct list_head *); extern void shrink_dentry_list(struct list_head *); -/* - * read_write.c - */ -extern int rw_verify_area(int, struct file *, const loff_t *, size_t); - /* * pipe.c */ diff --git a/fs/read_write.c b/fs/read_write.c index 0074afa7ecb3..4d60146243df 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -385,6 +385,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t return security_file_permission(file, read_write == READ ? MAY_READ : MAY_WRITE); } +EXPORT_SYMBOL(rw_verify_area); static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..0ebfc2519212 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3173,6 +3173,7 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); extern loff_t no_seek_end_llseek(struct file *, loff_t, int); +int rw_verify_area(int, struct file *, const loff_t *, size_t); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); extern int stream_open(struct inode * inode, struct file * filp); -- cgit v1.2.3-71-gd317 From f6f7a25a650818c61defa97d60a79b216618315f Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 12 Aug 2021 15:34:57 -0700 Subject: fs: export variant of generic_write_checks without iov_iter Encoded I/O in Btrfs needs to check a write with a given logical size without an iov_iter that matches that size (because the iov_iter we have is for the compressed data). So, factor out the parts of generic_write_check() that don't need an iov_iter into a new generic_write_checks_count() function and export that. Reviewed-by: Nikolay Borisov Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/read_write.c | 33 ++++++++++++++++++++------------- include/linux/fs.h | 1 + 2 files changed, 21 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/read_write.c b/fs/read_write.c index 4d60146243df..dc5000173b80 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1618,24 +1618,16 @@ int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count) return 0; } -/* - * Performs necessary checks before doing a write - * - * Can adjust writing position or amount of bytes to write. - * Returns appropriate error code that caller should return or - * zero in case that write should be allowed. - */ -ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) +/* Like generic_write_checks(), but takes size of write instead of iter. */ +int generic_write_checks_count(struct kiocb *iocb, loff_t *count) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - loff_t count; - int ret; if (IS_SWAPFILE(inode)) return -ETXTBSY; - if (!iov_iter_count(from)) + if (!*count) return 0; /* FIXME: this is for backwards compatibility with 2.4 */ @@ -1645,8 +1637,23 @@ ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) return -EINVAL; - count = iov_iter_count(from); - ret = generic_write_check_limits(file, iocb->ki_pos, &count); + return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count); +} +EXPORT_SYMBOL(generic_write_checks_count); + +/* + * Performs necessary checks before doing a write + * + * Can adjust writing position or amount of bytes to write. + * Returns appropriate error code that caller should return or + * zero in case that write should be allowed. + */ +ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) +{ + loff_t count = iov_iter_count(from); + int ret; + + ret = generic_write_checks_count(iocb, &count); if (ret) return ret; diff --git a/include/linux/fs.h b/include/linux/fs.h index 0ebfc2519212..27746a3da8fd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3130,6 +3130,7 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); +int generic_write_checks_count(struct kiocb *iocb, loff_t *count); extern int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); -- cgit v1.2.3-71-gd317 From ec090a0392ff634e9304c19a8578d476a9e0c830 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Fri, 25 Feb 2022 16:46:56 +0200 Subject: mtd: core: Remove partid and partname debugfs files partid and partname debugfs files were used just by SPI NOR, but they were replaced by sysfs entries. Since these debugfs files are no longer used in mtd, remove dead code. The directory is kept as it is used by nandsim, mtdswap and docg3. Signed-off-by: Tudor Ambarus Reviewed-by: Pratyush Yadav Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20220225144656.634682-1-tudor.ambarus@microchip.com --- drivers/mtd/mtdcore.c | 35 +---------------------------------- include/linux/mtd/mtd.h | 3 --- 2 files changed, 1 insertion(+), 37 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 1e7f3bbf847e..7dd105daf19d 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -336,28 +336,6 @@ static const struct device_type mtd_devtype = { .release = mtd_release, }; -static int mtd_partid_debug_show(struct seq_file *s, void *p) -{ - struct mtd_info *mtd = s->private; - - seq_printf(s, "%s\n", mtd->dbg.partid); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug); - -static int mtd_partname_debug_show(struct seq_file *s, void *p) -{ - struct mtd_info *mtd = s->private; - - seq_printf(s, "%s\n", mtd->dbg.partname); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); - static bool mtd_expert_analysis_mode; #ifdef CONFIG_DEBUG_FS @@ -377,23 +355,12 @@ static struct dentry *dfs_dir_mtd; static void mtd_debugfs_populate(struct mtd_info *mtd) { - struct mtd_info *master = mtd_get_master(mtd); struct device *dev = &mtd->dev; - struct dentry *root; if (IS_ERR_OR_NULL(dfs_dir_mtd)) return; - root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); - mtd->dbg.dfs_dir = root; - - if (master->dbg.partid) - debugfs_create_file("partid", 0400, root, master, - &mtd_partid_debug_fops); - - if (master->dbg.partname) - debugfs_create_file("partname", 0400, root, master, - &mtd_partname_debug_fops); + mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); } #ifndef CONFIG_MMU diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 1b3fc8c71ab3..151607e9d64a 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -188,9 +188,6 @@ struct module; /* only needed for owner field in mtd_info */ */ struct mtd_debug_info { struct dentry *dfs_dir; - - const char *partname; - const char *partid; }; /** -- cgit v1.2.3-71-gd317 From fc93db153b0187a9047f00bfd5cce75108530593 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 12 Mar 2022 13:45:05 -0800 Subject: net: disable preemption in dev_core_stats_XXX_inc() helpers syzbot was kind enough to remind us that dev->{tx_dropped|rx_dropped} could be increased in process context. BUG: using smp_processor_id() in preemptible [00000000] code: syz-executor413/3593 caller is netdev_core_stats_alloc+0x98/0x110 net/core/dev.c:10298 CPU: 1 PID: 3593 Comm: syz-executor413 Not tainted 5.17.0-rc7-syzkaller-02426-g97aeb877de7f #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 check_preemption_disabled+0x16b/0x170 lib/smp_processor_id.c:49 netdev_core_stats_alloc+0x98/0x110 net/core/dev.c:10298 dev_core_stats include/linux/netdevice.h:3855 [inline] dev_core_stats_rx_dropped_inc include/linux/netdevice.h:3866 [inline] tun_get_user+0x3455/0x3ab0 drivers/net/tun.c:1800 tun_chr_write_iter+0xe1/0x200 drivers/net/tun.c:2015 call_write_iter include/linux/fs.h:2074 [inline] new_sync_write+0x431/0x660 fs/read_write.c:503 vfs_write+0x7cd/0xae0 fs/read_write.c:590 ksys_write+0x12d/0x250 fs/read_write.c:643 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f2cf4f887e3 Code: 5d 41 5c 41 5d 41 5e e9 9b fd ff ff 66 2e 0f 1f 84 00 00 00 00 00 90 64 8b 04 25 18 00 00 00 85 c0 75 14 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 55 c3 0f 1f 40 00 48 83 ec 28 48 89 54 24 18 RSP: 002b:00007ffd50dd5fd8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 00007ffd50dd6000 RCX: 00007f2cf4f887e3 RDX: 000000000000002a RSI: 0000000000000000 RDI: 00000000000000c8 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffd50dd5ff0 R14: 00007ffd50dd5fe8 R15: 00007ffd50dd5fe4 Fixes: 625788b58445 ("net: add per-cpu storage and net->core_stats") Signed-off-by: Eric Dumazet Cc: jeffreyji Cc: Brian Vazquez Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20220312214505.3294762-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0d994710b335..8cbe96ce0a2c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3858,10 +3858,14 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de #define DEV_CORE_STATS_INC(FIELD) \ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ { \ - struct net_device_core_stats *p = dev_core_stats(dev); \ + struct net_device_core_stats *p; \ + \ + preempt_disable(); \ + p = dev_core_stats(dev); \ \ if (p) \ local_inc(&p->FIELD); \ + preempt_enable(); \ } DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) -- cgit v1.2.3-71-gd317 From c14c6843aeb8cdc8f6b0e49411d230e6f6dfda62 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:23 +0000 Subject: fs: read_mapping_page() should take a struct file argument While read_cache_page() takes a void *, because you can pass a pointer to anything as the first argument of filler_t, if we are calling read_mapping_page(), it will be passed as the first argument of ->readpage, so we know this must be a struct file pointer, and we should let the compiler enforce that for us. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/pagemap.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 270bf5136c34..55a80d8f0e9c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -636,15 +636,15 @@ extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, - pgoff_t index, void *data) + pgoff_t index, struct file *file) { - return read_cache_page(mapping, index, NULL, data); + return read_cache_page(mapping, index, NULL, file); } static inline struct folio *read_mapping_folio(struct address_space *mapping, - pgoff_t index, void *data) + pgoff_t index, struct file *file) { - return read_cache_folio(mapping, index, NULL, data); + return read_cache_folio(mapping, index, NULL, file); } /* -- cgit v1.2.3-71-gd317 From cd1067beeebfe23fc8cab071790fefb273962d51 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:26 +0000 Subject: buffer: Add folio_buffers() While there is no intent to use large folios in filesystems using buffer heads, converting the filesystems to use single-page folios is still worth doing to remove legacy infrastructure and hidden calls to compound_head(). These helper functions are needed for that conversion to take place. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/buffer_head.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 36f33685c8c0..3451f1fcda12 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -144,6 +144,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) ((struct buffer_head *)page_private(page)); \ }) #define page_has_buffers(page) PagePrivate(page) +#define folio_buffers(folio) folio_get_private(folio) void buffer_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback); -- cgit v1.2.3-71-gd317 From 2e7e80f7e7e9dbbb3c2a85ee923ca32826052816 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:27 +0000 Subject: fs: Convert is_partially_uptodate to folios Since the uptodate property is maintained on a per-folio basis, the is_partially_uptodate method should also take a folio. Fix the types at the same time so it's clear that it returns true/false and takes the count in bytes, not blocks. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 2 +- Documentation/filesystems/vfs.rst | 10 +++++----- fs/buffer.c | 26 ++++++++++++------------- fs/iomap/buffered-io.c | 36 ++++++++++++++++------------------- include/linux/buffer_head.h | 3 +-- include/linux/fs.h | 4 ++-- include/linux/iomap.h | 3 +-- mm/filemap.c | 4 ++-- 8 files changed, 40 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 3f9b1497ebb8..88b33524687f 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -258,7 +258,7 @@ prototypes:: int (*migratepage)(struct address_space *, struct page *, struct page *); void (*putback_page) (struct page *); int (*launder_page)(struct page *); - int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); + bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct file *); int (*swap_deactivate)(struct file *); diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index bf5c48066fac..da3e7b470f0a 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -747,8 +747,8 @@ cache in your filesystem. The following members are defined: void (*putback_page) (struct page *); int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); + bool (*is_partially_uptodate) (struct folio *, size_t from, + size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page) (struct mapping *mapping, struct page *page); int (*swap_activate)(struct file *); @@ -937,9 +937,9 @@ cache in your filesystem. The following members are defined: ``is_partially_uptodate`` Called by the VM when reading a file through the pagecache when - the underlying blocksize != pagesize. If the required block is - up to date then the read can complete without needing the IO to - bring the whole page up to date. + the underlying blocksize is smaller than the size of the folio. + If the required block is up to date then the read can complete + without needing I/O to bring the whole page up to date. ``is_dirty_writeback`` Called by the VM when attempting to reclaim a page. The VM uses diff --git a/fs/buffer.c b/fs/buffer.c index 8e112b6bd371..929061995cf8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2206,29 +2206,27 @@ int generic_write_end(struct file *file, struct address_space *mapping, EXPORT_SYMBOL(generic_write_end); /* - * block_is_partially_uptodate checks whether buffers within a page are + * block_is_partially_uptodate checks whether buffers within a folio are * uptodate or not. * - * Returns true if all buffers which correspond to a file portion - * we want to read are uptodate. + * Returns true if all buffers which correspond to the specified part + * of the folio are uptodate. */ -int block_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count) +bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { unsigned block_start, block_end, blocksize; unsigned to; struct buffer_head *bh, *head; - int ret = 1; - - if (!page_has_buffers(page)) - return 0; + bool ret = true; - head = page_buffers(page); + head = folio_buffers(folio); + if (!head) + return false; blocksize = head->b_size; - to = min_t(unsigned, PAGE_SIZE - from, count); + to = min_t(unsigned, folio_size(folio) - from, count); to = from + to; - if (from < blocksize && to > PAGE_SIZE - blocksize) - return 0; + if (from < blocksize && to > folio_size(folio) - blocksize) + return false; bh = head; block_start = 0; @@ -2236,7 +2234,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from, block_end = block_start + blocksize; if (block_end > from && block_start < to) { if (!buffer_uptodate(bh)) { - ret = 0; + ret = false; break; } if (block_end >= to) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d020a2e81a24..da0a7b15a64e 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -424,37 +424,33 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) EXPORT_SYMBOL_GPL(iomap_readahead); /* - * iomap_is_partially_uptodate checks whether blocks within a page are + * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * - * Returns true if all blocks which correspond to a file portion - * we want to read within the page are uptodate. + * Returns true if all blocks which correspond to the specified part + * of the folio are uptodate. */ -int -iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count) +bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { - struct folio *folio = page_folio(page); struct iomap_page *iop = to_iomap_page(folio); - struct inode *inode = page->mapping->host; - unsigned len, first, last; - unsigned i; + struct inode *inode = folio->mapping->host; + size_t len; + unsigned first, last, i; - /* Limit range to one page */ - len = min_t(unsigned, PAGE_SIZE - from, count); + if (!iop) + return false; + + /* Limit range to this folio */ + len = min(folio_size(folio) - from, count); /* First and last blocks in range within page */ first = from >> inode->i_blkbits; last = (from + len - 1) >> inode->i_blkbits; - if (iop) { - for (i = first; i <= last; i++) - if (!test_bit(i, iop->uptodate)) - return 0; - return 1; - } - - return 0; + for (i = first; i <= last; i++) + if (!test_bit(i, iop->uptodate)) + return false; + return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3451f1fcda12..79d465057889 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -225,8 +225,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); -int block_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block); int __block_write_begin(struct page *page, loff_t pos, unsigned len, diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..5939e6694ada 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -400,8 +400,8 @@ struct address_space_operations { bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); + bool (*is_partially_uptodate) (struct folio *, size_t from, + size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 97a3a2edb585..3bcbb264f83f 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -227,8 +227,7 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); void iomap_invalidatepage(struct page *page, unsigned int offset, diff --git a/mm/filemap.c b/mm/filemap.c index ad8c39d90bf9..9639b844dd31 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2452,7 +2452,7 @@ static bool filemap_range_uptodate(struct address_space *mapping, pos -= folio_pos(folio); } - return mapping->a_ops->is_partially_uptodate(&folio->page, pos, count); + return mapping->a_ops->is_partially_uptodate(folio, pos, count); } static int filemap_update_page(struct kiocb *iocb, @@ -2844,7 +2844,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas, offset = offset_in_folio(folio, start) & ~(bsz - 1); do { - if (ops->is_partially_uptodate(&folio->page, offset, bsz) == + if (ops->is_partially_uptodate(folio, offset, bsz) == seek_data) break; start = (start + bsz) & ~(bsz - 1); -- cgit v1.2.3-71-gd317 From aa1b46dcdc7baaf5fec0be25782ef24b26aa209e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 13 Mar 2022 21:15:02 -1000 Subject: block: fix rq-qos breakage from skipping rq_qos_done_bio() a647a524a467 ("block: don't call rq_qos_ops->done_bio if the bio isn't tracked") made bio_endio() skip rq_qos_done_bio() if BIO_TRACKED is not set. While this fixed a potential oops, it also broke blk-iocost by skipping the done_bio callback for merged bios. Before, whether a bio goes through rq_qos_throttle() or rq_qos_merge(), rq_qos_done_bio() would be called on the bio on completion with BIO_TRACKED distinguishing the former from the latter. rq_qos_done_bio() is not called for bios which wenth through rq_qos_merge(). This royally confuses blk-iocost as the merged bios never finish and are considered perpetually in-flight. One reliably reproducible failure mode is an intermediate cgroup geting stuck active preventing its children from being activated due to the leaf-only rule, leading to loss of control. The following is from resctl-bench protection scenario which emulates isolating a web server like workload from a memory bomb run on an iocost configuration which should yield a reasonable level of protection. # cat /sys/block/nvme2n1/device/model Samsung SSD 970 PRO 512GB # cat /sys/fs/cgroup/io.cost.model 259:0 ctrl=user model=linear rbps=834913556 rseqiops=93622 rrandiops=102913 wbps=618985353 wseqiops=72325 wrandiops=71025 # cat /sys/fs/cgroup/io.cost.qos 259:0 enable=1 ctrl=user rpct=95.00 rlat=18776 wpct=95.00 wlat=8897 min=60.00 max=100.00 # resctl-bench -m 29.6G -r out.json run protection::scenario=mem-hog,loops=1 ... Memory Hog Summary ================== IO Latency: R p50=242u:336u/2.5m p90=794u:1.4m/7.5m p99=2.7m:8.0m/62.5m max=8.0m:36.4m/350m W p50=221u:323u/1.5m p90=709u:1.2m/5.5m p99=1.5m:2.5m/9.5m max=6.9m:35.9m/350m Isolation and Request Latency Impact Distributions: min p01 p05 p10 p25 p50 p75 p90 p95 p99 max mean stdev isol% 15.90 15.90 15.90 40.05 57.24 59.07 60.01 74.63 74.63 90.35 90.35 58.12 15.82 lat-imp% 0 0 0 0 0 4.55 14.68 15.54 233.5 548.1 548.1 53.88 143.6 Result: isol=58.12:15.82% lat_imp=53.88%:143.6 work_csv=100.0% missing=3.96% The isolation result of 58.12% is close to what this device would show without any IO control. Fix it by introducing a new flag BIO_QOS_MERGED to mark merged bios and calling rq_qos_done_bio() on them too. For consistency and clarity, rename BIO_TRACKED to BIO_QOS_THROTTLED. The flag checks are moved into rq_qos_done_bio() so that it's next to the code paths that set the flags. With the patch applied, the above same benchmark shows: # resctl-bench -m 29.6G -r out.json run protection::scenario=mem-hog,loops=1 ... Memory Hog Summary ================== IO Latency: R p50=123u:84.4u/985u p90=322u:256u/2.5m p99=1.6m:1.4m/9.5m max=11.1m:36.0m/350m W p50=429u:274u/995u p90=1.7m:1.3m/4.5m p99=3.4m:2.7m/11.5m max=7.9m:5.9m/26.5m Isolation and Request Latency Impact Distributions: min p01 p05 p10 p25 p50 p75 p90 p95 p99 max mean stdev isol% 84.91 84.91 89.51 90.73 92.31 94.49 96.36 98.04 98.71 100.0 100.0 94.42 2.81 lat-imp% 0 0 0 0 0 2.81 5.73 11.11 13.92 17.53 22.61 4.10 4.68 Result: isol=94.42:2.81% lat_imp=4.10%:4.68 work_csv=58.34% missing=0% Signed-off-by: Tejun Heo Fixes: a647a524a467 ("block: don't call rq_qos_ops->done_bio if the bio isn't tracked") Cc: stable@vger.kernel.org # v5.15+ Cc: Ming Lei Cc: Yu Kuai Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/Yi7rdrzQEHjJLGKB@slm.duckdns.org Signed-off-by: Jens Axboe --- block/bio.c | 3 +-- block/blk-iolatency.c | 2 +- block/blk-rq-qos.h | 20 +++++++++++--------- include/linux/blk_types.h | 3 ++- 4 files changed, 15 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index 151cace2dbe1..33979f306e9e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1516,8 +1516,7 @@ again: if (!bio_integrity_endio(bio)) return; - if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) - rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); + rq_qos_done_bio(bio); if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 010e658d44a8..2f33932e72e3 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -598,7 +598,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) int inflight = 0; blkg = bio->bi_blkg; - if (!blkg || !bio_flagged(bio, BIO_TRACKED)) + if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED)) return; iolat = blkg_to_lat(bio->bi_blkg); diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index 3cfbc8668cba..68267007da1c 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -177,20 +177,20 @@ static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) __rq_qos_requeue(q->rq_qos, rq); } -static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) +static inline void rq_qos_done_bio(struct bio *bio) { - if (q->rq_qos) - __rq_qos_done_bio(q->rq_qos, bio); + if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || + bio_flagged(bio, BIO_QOS_MERGED))) { + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + if (q->rq_qos) + __rq_qos_done_bio(q->rq_qos, bio); + } } static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) { - /* - * BIO_TRACKED lets controllers know that a bio went through the - * normal rq_qos path. - */ if (q->rq_qos) { - bio_set_flag(bio, BIO_TRACKED); + bio_set_flag(bio, BIO_QOS_THROTTLED); __rq_qos_throttle(q->rq_qos, bio); } } @@ -205,8 +205,10 @@ static inline void rq_qos_track(struct request_queue *q, struct request *rq, static inline void rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - if (q->rq_qos) + if (q->rq_qos) { + bio_set_flag(bio, BIO_QOS_MERGED); __rq_qos_merge(q->rq_qos, rq, bio); + } } static inline void rq_qos_queue_depth_changed(struct request_queue *q) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5561e58d158a..0c3563b45fe9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -324,7 +324,8 @@ enum { BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion * of this bio. */ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ - BIO_TRACKED, /* set if bio goes through the rq_qos path */ + BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ + BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ -- cgit v1.2.3-71-gd317 From 45ceaf14d53a123e5955477da501bc6f26b99039 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 14 Mar 2022 19:45:37 -0700 Subject: Input: extract ChromeOS vivaldi physmap show function Let's introduce a common library file for the physmap show function duplicated between three different keyboard drivers. This largely copies the code from cros_ec_keyb.c which has the most recent version of the show function, while using the vivaldi_data struct from the hid-vivaldi driver. This saves a small amount of space in an allyesconfig build. $ ./scripts/bloat-o-meter vmlinux.before vmlinux.after add/remove: 3/0 grow/shrink: 2/3 up/down: 412/-720 (-308) Function old new delta vivaldi_function_row_physmap_show - 292 +292 _sub_I_65535_1 1057564 1057616 +52 _sub_D_65535_0 1057564 1057616 +52 e843419@49f2_00062737_9b04 - 8 +8 e843419@20f6_0002a34d_35bc - 8 +8 atkbd_parse_fwnode_data 480 472 -8 atkbd_do_show_function_row_physmap 316 76 -240 function_row_physmap_show 620 148 -472 Total: Before=285581925, After=285581617, chg -0.00% Signed-off-by: Stephen Boyd Tested-by: Stephen Boyd # coachz, wormdingler Link: https://lore.kernel.org/r/20220228075446.466016-3-dmitry.torokhov@gmail.com Signed-off-by: Dmitry Torokhov --- drivers/hid/Kconfig | 1 + drivers/hid/hid-vivaldi.c | 27 ++++++---------------- drivers/input/Kconfig | 7 ++++++ drivers/input/Makefile | 1 + drivers/input/keyboard/Kconfig | 2 ++ drivers/input/keyboard/atkbd.c | 27 +++++++--------------- drivers/input/keyboard/cros_ec_keyb.c | 43 +++++++++++++---------------------- drivers/input/vivaldi-fmap.c | 39 +++++++++++++++++++++++++++++++ include/linux/input/vivaldi-fmap.h | 27 ++++++++++++++++++++++ 9 files changed, 108 insertions(+), 66 deletions(-) create mode 100644 drivers/input/vivaldi-fmap.c create mode 100644 include/linux/input/vivaldi-fmap.h (limited to 'include/linux') diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index f5544157576c..5569a2029dab 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -411,6 +411,7 @@ config HID_GOOGLE_HAMMER config HID_VIVALDI tristate "Vivaldi Keyboard" + select INPUT_VIVALDIFMAP depends on HID help Say Y here if you want to enable support for Vivaldi keyboards. diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 42ceb2058a09..ca8cb40928e6 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -8,37 +8,24 @@ #include #include +#include #include #include #include -#define MIN_FN_ROW_KEY 1 -#define MAX_FN_ROW_KEY 24 +#define MIN_FN_ROW_KEY 1 +#define MAX_FN_ROW_KEY VIVALDI_MAX_FUNCTION_ROW_KEYS #define HID_VD_FN_ROW_PHYSMAP 0x00000001 #define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP) -struct vivaldi_data { - u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1]; - int max_function_row_key; -}; - static ssize_t function_row_physmap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct vivaldi_data *drvdata = hid_get_drvdata(hdev); - ssize_t size = 0; - int i; - - if (!drvdata->max_function_row_key) - return 0; - for (i = 0; i < drvdata->max_function_row_key; i++) - size += sprintf(buf + size, "%02X ", - drvdata->function_row_physmap[i]); - size += sprintf(buf + size, "\n"); - return size; + return vivaldi_function_row_physmap_show(drvdata, buf); } static DEVICE_ATTR_RO(function_row_physmap); @@ -85,11 +72,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL) return; - fn_key = (usage->hid & HID_USAGE); + fn_key = usage->hid & HID_USAGE; if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY) return; - if (fn_key > drvdata->max_function_row_key) - drvdata->max_function_row_key = fn_key; + if (fn_key > drvdata->num_function_row_keys) + drvdata->num_function_row_keys = fn_key; report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!report_data) diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 5baebf62df33..e2752f7364bc 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -77,6 +77,13 @@ config INPUT_MATRIXKMAP To compile this driver as a module, choose M here: the module will be called matrix-keymap. +config INPUT_VIVALDIFMAP + tristate + help + ChromeOS Vivaldi keymap support library. This is a hidden + option so that drivers can use common code to parse and + expose the vivaldi function row keymap. + comment "Userland interfaces" config INPUT_MOUSEDEV diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 037cc595106c..2266c7d010ef 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -12,6 +12,7 @@ input-core-y += touchscreen.o obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o +obj-$(CONFIG_INPUT_VIVALDIFMAP) += vivaldi-fmap.o obj-$(CONFIG_INPUT_LEDS) += input-leds.o obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index df1a029d029b..4ea79db8f134 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -103,6 +103,7 @@ config KEYBOARD_ATKBD select SERIO_LIBPS2 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO select SERIO_GSCPS2 if GSC + select INPUT_VIVALDIFMAP help Say Y here if you want to use a standard AT or PS/2 keyboard. Usually you'll need this, unless you have a different type keyboard (USB, ADB @@ -749,6 +750,7 @@ config KEYBOARD_XTKBD config KEYBOARD_CROS_EC tristate "ChromeOS EC keyboard" select INPUT_MATRIXKMAP + select INPUT_VIVALDIFMAP depends on CROS_EC help Say Y here to enable the matrix keyboard used by ChromeOS devices diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index fbdef95291e9..d4131236d18c 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -64,8 +65,6 @@ static bool atkbd_terminal; module_param_named(terminal, atkbd_terminal, bool, 0); MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2"); -#define MAX_FUNCTION_ROW_KEYS 24 - #define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF) #define KEYCODE(keymap) (keymap & 0xFFFF) @@ -237,8 +236,7 @@ struct atkbd { /* Serializes reconnect(), attr->set() and event work */ struct mutex mutex; - u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS]; - int num_function_row_keys; + struct vivaldi_data vdata; }; /* @@ -308,17 +306,7 @@ static struct attribute *atkbd_attributes[] = { static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf) { - ssize_t size = 0; - int i; - - if (!atkbd->num_function_row_keys) - return 0; - - for (i = 0; i < atkbd->num_function_row_keys; i++) - size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ", - atkbd->function_row_physmap[i]); - size += scnprintf(buf + size, PAGE_SIZE - size, "\n"); - return size; + return vivaldi_function_row_physmap_show(&atkbd->vdata, buf); } static umode_t atkbd_attr_is_visible(struct kobject *kobj, @@ -329,7 +317,7 @@ static umode_t atkbd_attr_is_visible(struct kobject *kobj, struct atkbd *atkbd = serio_get_drvdata(serio); if (attr == &atkbd_attr_function_row_physmap.attr && - !atkbd->num_function_row_keys) + !atkbd->vdata.num_function_row_keys) return 0; return attr->mode; @@ -1206,10 +1194,11 @@ static void atkbd_parse_fwnode_data(struct serio *serio) /* Parse "function-row-physmap" property */ n = device_property_count_u32(dev, "function-row-physmap"); - if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS && + if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS && !device_property_read_u32_array(dev, "function-row-physmap", - atkbd->function_row_physmap, n)) { - atkbd->num_function_row_keys = n; + atkbd->vdata.function_row_physmap, + n)) { + atkbd->vdata.num_function_row_keys = n; dev_dbg(dev, "FW reported %d function-row key locations\n", n); } } diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index fc02c540636e..6534dfca60b4 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -27,8 +28,6 @@ #include -#define MAX_NUM_TOP_ROW_KEYS 15 - /** * struct cros_ec_keyb - Structure representing EC keyboard device * @@ -44,9 +43,7 @@ * @idev: The input device for the matrix keys. * @bs_idev: The input device for non-matrix buttons and switches (or NULL). * @notifier: interrupt event notifier for transport devices - * @function_row_physmap: An array of the encoded rows/columns for the top - * row function keys, in an order from left to right - * @num_function_row_keys: The number of top row keys in a custom keyboard + * @vdata: vivaldi function row data */ struct cros_ec_keyb { unsigned int rows; @@ -64,8 +61,7 @@ struct cros_ec_keyb { struct input_dev *bs_idev; struct notifier_block notifier; - u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS]; - size_t num_function_row_keys; + struct vivaldi_data vdata; }; /** @@ -537,9 +533,9 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev) int err; struct property *prop; const __be32 *p; - u16 *physmap; + u32 *physmap; u32 key_pos; - int row, col; + unsigned int row, col, scancode, n_physmap; err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols); if (err) @@ -591,20 +587,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev) ckdev->idev = idev; cros_ec_keyb_compute_valid_keys(ckdev); - physmap = ckdev->function_row_physmap; + physmap = ckdev->vdata.function_row_physmap; + n_physmap = 0; of_property_for_each_u32(dev->of_node, "function-row-physmap", prop, p, key_pos) { - if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) { + if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) { dev_warn(dev, "Only support up to %d top row keys\n", - MAX_NUM_TOP_ROW_KEYS); + VIVALDI_MAX_FUNCTION_ROW_KEYS); break; } row = KEY_ROW(key_pos); col = KEY_COL(key_pos); - *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift); - physmap++; - ckdev->num_function_row_keys++; + scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift); + physmap[n_physmap++] = scancode; } + ckdev->vdata.num_function_row_keys = n_physmap; err = input_register_device(ckdev->idev); if (err) { @@ -619,18 +616,10 @@ static ssize_t function_row_physmap_show(struct device *dev, struct device_attribute *attr, char *buf) { - ssize_t size = 0; - int i; - struct cros_ec_keyb *ckdev = dev_get_drvdata(dev); - u16 *physmap = ckdev->function_row_physmap; - - for (i = 0; i < ckdev->num_function_row_keys; i++) - size += scnprintf(buf + size, PAGE_SIZE - size, - "%s%02X", size ? " " : "", physmap[i]); - if (size) - size += scnprintf(buf + size, PAGE_SIZE - size, "\n"); + const struct cros_ec_keyb *ckdev = dev_get_drvdata(dev); + const struct vivaldi_data *data = &ckdev->vdata; - return size; + return vivaldi_function_row_physmap_show(data, buf); } static DEVICE_ATTR_RO(function_row_physmap); @@ -648,7 +637,7 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj, struct cros_ec_keyb *ckdev = dev_get_drvdata(dev); if (attr == &dev_attr_function_row_physmap.attr && - !ckdev->num_function_row_keys) + !ckdev->vdata.num_function_row_keys) return 0; return attr->mode; diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c new file mode 100644 index 000000000000..6dae83d96806 --- /dev/null +++ b/drivers/input/vivaldi-fmap.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helpers for ChromeOS Vivaldi keyboard function row mapping + * + * Copyright (C) 2022 Google, Inc + */ + +#include +#include +#include +#include +#include + +/** + * vivaldi_function_row_physmap_show - Print vivaldi function row physmap attribute + * @data: The vivaldi function row map + * @buf: Buffer to print the function row phsymap to + */ +ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data, + char *buf) +{ + ssize_t size = 0; + int i; + const u32 *physmap = data->function_row_physmap; + + if (!data->num_function_row_keys) + return 0; + + for (i = 0; i < data->num_function_row_keys; i++) + size += scnprintf(buf + size, PAGE_SIZE - size, + "%s%02X", size ? " " : "", physmap[i]); + if (size) + size += scnprintf(buf + size, PAGE_SIZE - size, "\n"); + + return size; +} +EXPORT_SYMBOL_GPL(vivaldi_function_row_physmap_show); + +MODULE_LICENSE("GPL"); diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h new file mode 100644 index 000000000000..7e4b7023bf04 --- /dev/null +++ b/include/linux/input/vivaldi-fmap.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VIVALDI_FMAP_H +#define _VIVALDI_FMAP_H + +#include + +#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24 + +/** + * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards + * @function_row_physmap: An array of scancodes or their equivalent (HID usage + * codes, encoded rows/columns, etc) for the top + * row function keys, in an order from left to right + * @num_function_row_keys: The number of top row keys in a custom keyboard + * + * This structure is supposed to be used by ChromeOS keyboards using + * the Vivaldi keyboard function row design. + */ +struct vivaldi_data { + u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS]; + unsigned int num_function_row_keys; +}; + +ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data, + char *buf); + +#endif /* _VIVALDI_FMAP_H */ -- cgit v1.2.3-71-gd317 From c8c301abeae58ec756b8fcb2178a632bd3c9e284 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Mar 2022 16:30:18 +0100 Subject: x86/ibt: Add ANNOTATE_NOENDBR In order to have objtool warn about code references to !ENDBR instruction, we need an annotation to allow this for non-control-flow instances -- consider text range checks, text patching, or return trampolines etc. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220308154317.578968224@infradead.org --- include/linux/objtool.h | 16 ++++++++++++++++ tools/include/linux/objtool.h | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'include/linux') diff --git a/include/linux/objtool.h b/include/linux/objtool.h index aca52db2f3f3..f797368820c8 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -77,6 +77,12 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD_FP(func) #endif +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -129,6 +135,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -139,12 +152,15 @@ struct unwind_hint { "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index aca52db2f3f3..f797368820c8 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -77,6 +77,12 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD_FP(func) #endif +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -129,6 +135,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -139,12 +152,15 @@ struct unwind_hint { "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ -- cgit v1.2.3-71-gd317 From cc66bb91457827f62e2b6cb2518666820f0a6c48 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Mar 2022 16:30:32 +0100 Subject: x86/ibt,kprobes: Cure sym+0 equals fentry woes In order to allow kprobes to skip the ENDBR instructions at sym+0 for X86_KERNEL_IBT builds, change _kprobe_addr() to take an architecture callback to inspect the function at hand and modify the offset if needed. This streamlines the existing interface to cover more cases and require less hooks. Once PowerPC gets fully converted there will only be the one arch hook. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220308154318.405947704@infradead.org --- arch/powerpc/kernel/kprobes.c | 34 +++++++++++++--------- arch/x86/kernel/kprobes/core.c | 17 +++++++++++ include/linux/kprobes.h | 3 +- kernel/kprobes.c | 66 +++++++++++++++++++++++++++++++++--------- 4 files changed, 92 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 9a492fdec1df..7dae0b01abfb 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -105,6 +105,27 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) return addr; } +static bool arch_kprobe_on_func_entry(unsigned long offset) +{ +#ifdef PPC64_ELF_ABI_v2 +#ifdef CONFIG_KPROBES_ON_FTRACE + return offset <= 16; +#else + return offset <= 8; +#endif +#else + return !offset; +#endif +} + +/* XXX try and fold the magic of kprobe_lookup_name() in this */ +kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, + bool *on_func_entry) +{ + *on_func_entry = arch_kprobe_on_func_entry(offset); + return (kprobe_opcode_t *)(addr + offset); +} + void *alloc_insn_page(void) { void *page; @@ -218,19 +239,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs kcb->kprobe_saved_msr = regs->msr; } -bool arch_kprobe_on_func_entry(unsigned long offset) -{ -#ifdef PPC64_ELF_ABI_v2 -#ifdef CONFIG_KPROBES_ON_FTRACE - return offset <= 16; -#else - return offset <= 8; -#endif -#else - return !offset; -#endif -} - void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 4d8086a1627e..9ea0e3e79896 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "common.h" @@ -294,6 +295,22 @@ static int can_probe(unsigned long paddr) return (addr == paddr); } +/* If x86 supports IBT (ENDBR) it must be skipped. */ +kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, + bool *on_func_entry) +{ + if (is_endbr(*(u32 *)addr)) { + *on_func_entry = !offset || offset == 4; + if (*on_func_entry) + offset = 4; + + } else { + *on_func_entry = !offset; + } + + return (kprobe_opcode_t *)(addr + offset); +} + /* * Copy an instruction with recovering modified instruction by kprobes * and adjust the displacement if the instruction uses the %rip-relative diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 19b884353b15..9c28f7a0ef42 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -265,7 +265,6 @@ extern int arch_init_kprobes(void); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); -extern bool arch_kprobe_on_func_entry(unsigned long offset); extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); @@ -384,6 +383,8 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) } kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); +kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry); + int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int register_kprobes(struct kprobe **kps, int num); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6d1e11cda4f1..185badc780b7 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1488,25 +1488,69 @@ bool within_kprobe_blacklist(unsigned long addr) return false; } +/* + * arch_adjust_kprobe_addr - adjust the address + * @addr: symbol base address + * @offset: offset within the symbol + * @on_func_entry: was this @addr+@offset on the function entry + * + * Typically returns @addr + @offset, except for special cases where the + * function might be prefixed by a CFI landing pad, in that case any offset + * inside the landing pad is mapped to the first 'real' instruction of the + * symbol. + * + * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C + * instruction at +0. + */ +kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr, + unsigned long offset, + bool *on_func_entry) +{ + *on_func_entry = !offset; + return (kprobe_opcode_t *)(addr + offset); +} + /* * If 'symbol_name' is specified, look it up and add the 'offset' * to it. This way, we can specify a relative address to a symbol. * This returns encoded errors if it fails to look up symbol or invalid * combination of parameters. */ -static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, - const char *symbol_name, unsigned int offset) +static kprobe_opcode_t * +_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, + unsigned long offset, bool *on_func_entry) { if ((symbol_name && addr) || (!symbol_name && !addr)) goto invalid; if (symbol_name) { + /* + * Input: @sym + @offset + * Output: @addr + @offset + * + * NOTE: kprobe_lookup_name() does *NOT* fold the offset + * argument into it's output! + */ addr = kprobe_lookup_name(symbol_name, offset); if (!addr) return ERR_PTR(-ENOENT); } - addr = (kprobe_opcode_t *)(((char *)addr) + offset); + /* + * So here we have @addr + @offset, displace it into a new + * @addr' + @offset' where @addr' is the symbol start address. + */ + addr = (void *)addr + offset; + if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset)) + return ERR_PTR(-ENOENT); + addr = (void *)addr - offset; + + /* + * Then ask the architecture to re-combine them, taking care of + * magical function entry details while telling us if this was indeed + * at the start of the function. + */ + addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry); if (addr) return addr; @@ -1516,7 +1560,8 @@ invalid: static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { - return _kprobe_addr(p->addr, p->symbol_name, p->offset); + bool on_func_entry; + return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); } /* @@ -2043,11 +2088,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) } NOKPROBE_SYMBOL(pre_handler_kretprobe); -bool __weak arch_kprobe_on_func_entry(unsigned long offset) -{ - return !offset; -} - /** * kprobe_on_func_entry() -- check whether given address is function entry * @addr: Target address @@ -2063,15 +2103,13 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset) */ int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) { - kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); + bool on_func_entry; + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry); if (IS_ERR(kp_addr)) return PTR_ERR(kp_addr); - if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) - return -ENOENT; - - if (!arch_kprobe_on_func_entry(offset)) + if (!on_func_entry) return -EINVAL; return 0; -- cgit v1.2.3-71-gd317 From cb9010f87dcbcdbb51cc96b922c6260848cecbd1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Mar 2022 16:30:44 +0100 Subject: x86/ibt: Ensure module init/exit points have references Since the references to the module init/exit points only have external references, a module LTO run will consider them 'unused' and seal them, leading to an immediate fail on module load. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220308154319.113767246@infradead.org --- include/linux/cfi.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 879744aaa6e0..c6dfc1ed0626 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -34,8 +34,17 @@ static inline void cfi_module_remove(struct module *mod, unsigned long base_addr #else /* !CONFIG_CFI_CLANG */ -#define __CFI_ADDRESSABLE(fn, __attr) +#ifdef CONFIG_X86_KERNEL_IBT + +#define __CFI_ADDRESSABLE(fn, __attr) \ + const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn + +#endif /* CONFIG_X86_KERNEL_IBT */ #endif /* CONFIG_CFI_CLANG */ +#ifndef __CFI_ADDRESSABLE +#define __CFI_ADDRESSABLE(fn, __attr) +#endif + #endif /* _LINUX_CFI_H */ -- cgit v1.2.3-71-gd317 From eae654f1c21216daa9fbb92591c0d9f5ae46cfc5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Mar 2022 16:30:48 +0100 Subject: exit: Mark do_group_exit() __noreturn vmlinux.o: warning: objtool: get_signal()+0x108: unreachable instruction 0000 000000000007f930 : ... 0103 7fa33: e8 00 00 00 00 call 7fa38 7fa34: R_X86_64_PLT32 do_group_exit-0x4 0108 7fa38: 41 8b 45 74 mov 0x74(%r13),%eax Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220308154319.351270711@infradead.org --- include/linux/sched/task.h | 2 +- kernel/exit.c | 2 +- tools/objtool/check.c | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index e84e54d1b490..719c9a6cac8d 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -79,7 +79,7 @@ static inline void exit_thread(struct task_struct *tsk) { } #endif -extern void do_group_exit(int); +extern __noreturn void do_group_exit(int); extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..b71f9df9074e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -906,7 +906,7 @@ SYSCALL_DEFINE1(exit, int, error_code) * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ -void +void __noreturn do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; diff --git a/tools/objtool/check.c b/tools/objtool/check.c index c3ddcecdab57..9896562350a8 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -181,6 +181,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "kunit_try_catch_throw", "xen_start_kernel", "cpu_bringup_and_idle", + "do_group_exit", "stop_this_cpu", }; -- cgit v1.2.3-71-gd317 From 105cd68596392cfe15056a891b0723609dcad247 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Mar 2022 17:58:35 +0100 Subject: x86: Mark __invalid_creds() __noreturn vmlinux.o: warning: objtool: ksys_unshare()+0x36c: unreachable instruction 0000 0000000000067040 : ... 0364 673a4: 4c 89 ef mov %r13,%rdi 0367 673a7: e8 00 00 00 00 call 673ac 673a8: R_X86_64_PLT32 __invalid_creds-0x4 036c 673ac: e9 28 ff ff ff jmp 672d9 0371 673b1: 41 bc f4 ff ff ff mov $0xfffffff4,%r12d 0377 673b7: e9 80 fd ff ff jmp 6713c Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/Yi9gOW9f1GGwwUD6@hirez.programming.kicks-ass.net --- include/linux/cred.h | 2 +- kernel/cred.c | 2 +- tools/objtool/check.c | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cred.h b/include/linux/cred.h index fcbc6885cc09..9ed9232af934 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -176,7 +176,7 @@ extern int set_cred_ucounts(struct cred *); * check for validity of credentials */ #ifdef CONFIG_DEBUG_CREDENTIALS -extern void __invalid_creds(const struct cred *, const char *, unsigned); +extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned); extern void __validate_process_creds(struct task_struct *, const char *, unsigned); diff --git a/kernel/cred.c b/kernel/cred.c index 933155c96922..e10c15f51c1f 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -870,7 +870,7 @@ static void dump_invalid_creds(const struct cred *cred, const char *label, /* * report use of invalid credentials */ -void __invalid_creds(const struct cred *cred, const char *file, unsigned line) +void __noreturn __invalid_creds(const struct cred *cred, const char *file, unsigned line) { printk(KERN_ERR "CRED: Invalid credentials\n"); printk(KERN_ERR "CRED: At %s:%u\n", file, line); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9896562350a8..0c857e74c852 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -183,6 +183,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "cpu_bringup_and_idle", "do_group_exit", "stop_this_cpu", + "__invalid_creds", }; if (!func) -- cgit v1.2.3-71-gd317 From dca5da2abe406168b85f97e22109710ebe0bda08 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Mar 2022 18:05:52 +0100 Subject: x86,objtool: Move the ASM_REACHABLE annotation to objtool.h Because we need a variant for .S files too. Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/Yi9gOW9f1GGwwUD6@hirez.programming.kicks-ass.net --- arch/x86/include/asm/bug.h | 1 + arch/x86/include/asm/irq_stack.h | 1 + include/linux/compiler.h | 7 ------- include/linux/objtool.h | 16 ++++++++++++++++ tools/include/linux/objtool.h | 16 ++++++++++++++++ 5 files changed, 34 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index bab883c0b6fe..4d20a293c6fd 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -4,6 +4,7 @@ #include #include +#include /* * Despite that some emulators terminate on UD2, we use it for WARN(). diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 05af249d6bec..63f818aedf77 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -3,6 +3,7 @@ #define _ASM_X86_IRQ_STACK_H #include +#include #include diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0f7fd205ab7e..219aa5ddbc73 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -125,18 +125,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, }) #define annotate_unreachable() __annotate_unreachable(__COUNTER__) -#define ASM_REACHABLE \ - "998:\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long 998b - .\n\t" \ - ".popsection\n\t" - /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(".rodata..c_jump_table") #else #define annotate_unreachable() -# define ASM_REACHABLE #define __annotate_jump_table #endif diff --git a/include/linux/objtool.h b/include/linux/objtool.h index f797368820c8..586d35720f13 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -83,6 +83,12 @@ struct unwind_hint { _ASM_PTR " 986b\n\t" \ ".popsection\n\t" +#define ASM_REACHABLE \ + "998:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long 998b - .\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -142,6 +148,13 @@ struct unwind_hint { .popsection .endm +.macro REACHABLE +.Lhere_\@: + .pushsection .discard.reachable + .long .Lhere_\@ - . + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -153,6 +166,7 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) #define ANNOTATE_NOENDBR +#define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 @@ -161,6 +175,8 @@ struct unwind_hint { .endm .macro ANNOTATE_NOENDBR .endm +.macro REACHABLE +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index f797368820c8..586d35720f13 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -83,6 +83,12 @@ struct unwind_hint { _ASM_PTR " 986b\n\t" \ ".popsection\n\t" +#define ASM_REACHABLE \ + "998:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long 998b - .\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -142,6 +148,13 @@ struct unwind_hint { .popsection .endm +.macro REACHABLE +.Lhere_\@: + .pushsection .discard.reachable + .long .Lhere_\@ - . + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -153,6 +166,7 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) #define ANNOTATE_NOENDBR +#define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 @@ -161,6 +175,8 @@ struct unwind_hint { .endm .macro ANNOTATE_NOENDBR .endm +.macro REACHABLE +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ -- cgit v1.2.3-71-gd317 From 5ad6b2bdaaea712486145fa5a78ec24d25289071 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:28 +0000 Subject: fs: Turn do_invalidatepage() into folio_invalidate() Take a folio instead of a page, fix the types of the offset & length, and export it to filesystems. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/mm.h | 3 --- include/linux/pagemap.h | 1 + mm/readahead.c | 2 +- mm/truncate.c | 20 ++++++++++---------- 4 files changed, 12 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 213cc569b192..7808a7959066 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1939,9 +1939,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); -extern void do_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); - bool folio_mark_dirty(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 55a80d8f0e9c..4503d5baa252 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -893,6 +893,7 @@ static inline void cancel_dirty_page(struct page *page) } bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); +void folio_invalidate(struct folio *folio, size_t offset, size_t length); int __must_check folio_write_one(struct folio *folio); static inline int __must_check write_one_page(struct page *page) { diff --git a/mm/readahead.c b/mm/readahead.c index cf0dcf89eb69..c3c4c30fc121 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -51,7 +51,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping, if (!trylock_page(page)) BUG(); page->mapping = mapping; - do_invalidatepage(page, 0, PAGE_SIZE); + folio_invalidate(page_folio(page), 0, PAGE_SIZE); page->mapping = NULL; unlock_page(page); } diff --git a/mm/truncate.c b/mm/truncate.c index 9dbf0b75da5d..aa0ed373789d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -138,33 +138,33 @@ static int invalidate_exceptional_entry2(struct address_space *mapping, } /** - * do_invalidatepage - invalidate part or all of a page - * @page: the page which is affected + * folio_invalidate - Invalidate part or all of a folio. + * @folio: The folio which is affected. * @offset: start of the range to invalidate * @length: length of the range to invalidate * - * do_invalidatepage() is called when all or part of the page has become + * folio_invalidate() is called when all or part of the folio has become * invalidated by a truncate operation. * - * do_invalidatepage() does not have to release all buffers, but it must + * folio_invalidate() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void do_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void folio_invalidate(struct folio *folio, size_t offset, size_t length) { void (*invalidatepage)(struct page *, unsigned int, unsigned int); - invalidatepage = page->mapping->a_ops->invalidatepage; + invalidatepage = folio->mapping->a_ops->invalidatepage; #ifdef CONFIG_BLOCK if (!invalidatepage) invalidatepage = block_invalidatepage; #endif if (invalidatepage) - (*invalidatepage)(page, offset, length); + (*invalidatepage)(&folio->page, offset, length); } +EXPORT_SYMBOL_GPL(folio_invalidate); /* * If truncate cannot remove the fs-private metadata from the page, the page @@ -182,7 +182,7 @@ static void truncate_cleanup_folio(struct folio *folio) unmap_mapping_folio(folio); if (folio_has_private(folio)) - do_invalidatepage(&folio->page, 0, folio_size(folio)); + folio_invalidate(folio, 0, folio_size(folio)); /* * Some filesystems seem to re-dirty the page even after @@ -264,7 +264,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) folio_zero_range(folio, offset, length); if (folio_has_private(folio)) - do_invalidatepage(&folio->page, offset, length); + folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; if (split_huge_page(&folio->page) == 0) -- cgit v1.2.3-71-gd317 From 128d1f8241d62ab014eef6dd4ef9bb977dbeadb2 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:32 +0000 Subject: fs: Add invalidate_folio() aops method This is used in preference to invalidatepage, if defined. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 13 +++++++------ Documentation/filesystems/vfs.rst | 11 ++++++----- include/linux/fs.h | 1 + mm/truncate.c | 8 +++++++- 4 files changed, 21 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 88b33524687f..29a045fd3860 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -250,6 +250,7 @@ prototypes:: loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio) (struct folio *, size_t start, size_t len); void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); @@ -278,6 +279,7 @@ readpages: no shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: +invalidate_folio: yes exclusive invalidatepage: yes exclusive releasepage: yes freepage: yes @@ -370,13 +372,12 @@ not locked. filesystems and by the swapper. The latter will eventually go away. Please, keep it that way and don't breed new callers. -->invalidatepage() is called when the filesystem must attempt to drop +->invalidate_folio() is called when the filesystem must attempt to drop some or all of the buffers from the page when it is being truncated. It -returns zero on success. If ->invalidatepage is zero, the kernel uses -block_invalidatepage() instead. The filesystem must exclusively acquire -invalidate_lock before invalidating page cache in truncate / hole punch path -(and thus calling into ->invalidatepage) to block races between page cache -invalidation and page cache filling functions (fault, read, ...). +returns zero on success. The filesystem must exclusively acquire +invalidate_lock before invalidating page cache in truncate / hole punch +path (and thus calling into ->invalidate_folio) to block races between page +cache invalidation and page cache filling functions (fault, read, ...). ->releasepage() is called when the kernel is about to try to drop the buffers from the page in preparation for freeing it. It returns zero to diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index da3e7b470f0a..26c090cd8cf5 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -735,6 +735,7 @@ cache in your filesystem. The following members are defined: loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio) (struct folio *, size_t start, size_t len); void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); @@ -868,15 +869,15 @@ cache in your filesystem. The following members are defined: to find out where the blocks in the file are and uses those addresses directly. -``invalidatepage`` - If a page has PagePrivate set, then invalidatepage will be - called when part or all of the page is to be removed from the +``invalidate_folio`` + If a folio has private data, then invalidate_folio will be + called when part or all of the folio is to be removed from the address space. This generally corresponds to either a truncation, punch hole or a complete invalidation of the address space (in the latter case 'offset' will always be 0 and 'length' - will be PAGE_SIZE). Any private data associated with the page + will be folio_size()). Any private data associated with the page should be updated to reflect this truncation. If offset is 0 - and length is PAGE_SIZE, then the private data should be + and length is folio_size(), then the private data should be released, because the page must be able to be completely discarded. This may be done by calling the ->releasepage function, but in this case the release MUST succeed. diff --git a/include/linux/fs.h b/include/linux/fs.h index 5939e6694ada..bcdb613cd652 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -387,6 +387,7 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidate_folio) (struct folio *, size_t offset, size_t len); void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); diff --git a/mm/truncate.c b/mm/truncate.c index aa0ed373789d..b9ad298e6ce7 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -154,9 +154,15 @@ static int invalidate_exceptional_entry2(struct address_space *mapping, */ void folio_invalidate(struct folio *folio, size_t offset, size_t length) { + const struct address_space_operations *aops = folio->mapping->a_ops; void (*invalidatepage)(struct page *, unsigned int, unsigned int); - invalidatepage = folio->mapping->a_ops->invalidatepage; + if (aops->invalidate_folio) { + aops->invalidate_folio(folio, offset, length); + return; + } + + invalidatepage = aops->invalidatepage; #ifdef CONFIG_BLOCK if (!invalidatepage) invalidatepage = block_invalidatepage; -- cgit v1.2.3-71-gd317 From d82354f6b05fc3b35029b3f75ddbf41b82af3bc8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:33 +0000 Subject: iomap: Remove iomap_invalidatepage() Use iomap_invalidate_folio() in all the iomap-based filesystems and rename the iomap_invalidatepage tracepoint. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- fs/gfs2/aops.c | 2 +- fs/iomap/buffered-io.c | 9 +-------- fs/iomap/trace.h | 2 +- fs/xfs/xfs_aops.c | 2 +- fs/zonefs/super.c | 2 +- include/linux/iomap.h | 2 -- 6 files changed, 5 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 005e920f5d4a..3d54e6101ed1 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -781,7 +781,7 @@ static const struct address_space_operations gfs2_aops = { .readahead = gfs2_readahead, .set_page_dirty = __set_page_dirty_nobuffers, .releasepage = iomap_releasepage, - .invalidatepage = iomap_invalidatepage, + .invalidate_folio = iomap_invalidate_folio, .bmap = gfs2_bmap, .direct_IO = noop_direct_IO, .migratepage = iomap_migrate_page, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index da0a7b15a64e..f1df2c9ee584 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -476,7 +476,7 @@ EXPORT_SYMBOL_GPL(iomap_releasepage); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { - trace_iomap_invalidatepage(folio->mapping->host, + trace_iomap_invalidate_folio(folio->mapping->host, folio_pos(folio) + offset, len); /* @@ -496,13 +496,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) } EXPORT_SYMBOL_GPL(iomap_invalidate_folio); -void iomap_invalidatepage(struct page *page, unsigned int offset, - unsigned int len) -{ - iomap_invalidate_folio(page_folio(page), offset, len); -} -EXPORT_SYMBOL_GPL(iomap_invalidatepage); - #ifdef CONFIG_MIGRATION int iomap_migrate_page(struct address_space *mapping, struct page *newpage, diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 65e39785c284..a6689a563c6e 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -81,7 +81,7 @@ DEFINE_EVENT(iomap_range_class, name, \ TP_ARGS(inode, off, len)) DEFINE_RANGE_EVENT(iomap_writepage); DEFINE_RANGE_EVENT(iomap_releasepage); -DEFINE_RANGE_EVENT(iomap_invalidatepage); +DEFINE_RANGE_EVENT(iomap_invalidate_folio); DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail); #define IOMAP_TYPE_STRINGS \ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 9d6a67c7d227..51a040b658cb 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -569,7 +569,7 @@ const struct address_space_operations xfs_address_space_operations = { .writepages = xfs_vm_writepages, .set_page_dirty = __set_page_dirty_nobuffers, .releasepage = iomap_releasepage, - .invalidatepage = iomap_invalidatepage, + .invalidate_folio = iomap_invalidate_folio, .bmap = xfs_vm_bmap, .direct_IO = noop_direct_IO, .migratepage = iomap_migrate_page, diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index b76dfb310ab6..887b39553eb4 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -187,7 +187,7 @@ static const struct address_space_operations zonefs_file_aops = { .writepages = zonefs_writepages, .set_page_dirty = __set_page_dirty_nobuffers, .releasepage = iomap_releasepage, - .invalidatepage = iomap_invalidatepage, + .invalidate_folio = iomap_invalidate_folio, .migratepage = iomap_migrate_page, .is_partially_uptodate = iomap_is_partially_uptodate, .error_remove_page = generic_error_remove_page, diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 3bcbb264f83f..b76f0dd149fb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -230,8 +230,6 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); -void iomap_invalidatepage(struct page *page, unsigned int offset, - unsigned int len); #ifdef CONFIG_MIGRATION int iomap_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); -- cgit v1.2.3-71-gd317 From 7ba13abbd31ee9265e88d7dc029c0f786e665192 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:34 +0000 Subject: fs: Turn block_invalidatepage into block_invalidate_folio Remove special-casing of a NULL invalidatepage, since there is no more block_invalidatepage. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- block/fops.c | 1 + fs/adfs/inode.c | 1 + fs/affs/file.c | 2 ++ fs/bfs/file.c | 1 + fs/buffer.c | 37 ++++++++++++++++++------------------- fs/ecryptfs/mmap.c | 1 + fs/exfat/inode.c | 1 + fs/ext2/inode.c | 2 ++ fs/ext4/inode.c | 32 ++++++++++++++++---------------- fs/fat/inode.c | 1 + fs/gfs2/meta_io.c | 2 ++ fs/hfs/inode.c | 2 ++ fs/hfsplus/inode.c | 2 ++ fs/hpfs/file.c | 1 + fs/jfs/inode.c | 1 + fs/minix/inode.c | 1 + fs/nilfs2/inode.c | 2 +- fs/nilfs2/mdt.c | 1 + fs/ntfs/aops.c | 5 +++-- fs/ocfs2/aops.c | 2 +- fs/omfs/file.c | 1 + fs/sysv/itree.c | 1 + fs/udf/file.c | 1 + fs/udf/inode.c | 1 + fs/ufs/inode.c | 1 + include/linux/buffer_head.h | 3 +-- mm/truncate.c | 4 ---- 27 files changed, 65 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/block/fops.c b/block/fops.c index 4f59e0f5bf30..8ce1dccd15b9 100644 --- a/block/fops.c +++ b/block/fops.c @@ -430,6 +430,7 @@ static int blkdev_writepages(struct address_space *mapping, const struct address_space_operations def_blk_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = blkdev_readpage, .readahead = blkdev_readahead, .writepage = blkdev_writepage, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 5156821bfe6a..5c423254895a 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -74,6 +74,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations adfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = adfs_readpage, .writepage = adfs_writepage, .write_begin = adfs_write_begin, diff --git a/fs/affs/file.c b/fs/affs/file.c index 75ebd2b576ca..6d4921f97162 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -454,6 +454,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations affs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, @@ -835,6 +836,7 @@ err_bh: const struct address_space_operations affs_aops_ofs = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage_ofs, //.writepage = affs_writepage_ofs, .write_begin = affs_write_begin_ofs, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 7f8544abf636..2e42b82edb58 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -189,6 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations bfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = bfs_readpage, .writepage = bfs_writepage, .write_begin = bfs_write_begin, diff --git a/fs/buffer.c b/fs/buffer.c index 929061995cf8..5fe02e5a9807 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1482,41 +1482,40 @@ static void discard_buffer(struct buffer_head * bh) } /** - * block_invalidatepage - invalidate part or all of a buffer-backed page - * - * @page: the page which is affected + * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. + * @folio: The folio which is affected. * @offset: start of the range to invalidate * @length: length of the range to invalidate * - * block_invalidatepage() is called when all or part of the page has become + * block_invalidate_folio() is called when all or part of the folio has been * invalidated by a truncate operation. * - * block_invalidatepage() does not have to release all buffers, but it must + * block_invalidate_folio() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct buffer_head *head, *bh, *next; - unsigned int curr_off = 0; - unsigned int stop = length + offset; + size_t curr_off = 0; + size_t stop = length + offset; - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) - goto out; + BUG_ON(!folio_test_locked(folio)); /* * Check for overflow */ - BUG_ON(stop > PAGE_SIZE || stop < length); + BUG_ON(stop > folio_size(folio) || stop < length); + + head = folio_buffers(folio); + if (!head) + return; - head = page_buffers(page); bh = head; do { - unsigned int next_off = curr_off + bh->b_size; + size_t next_off = curr_off + bh->b_size; next = bh->b_this_page; /* @@ -1535,16 +1534,16 @@ void block_invalidatepage(struct page *page, unsigned int offset, } while (bh != head); /* - * We release buffers only if the entire page is being invalidated. + * We release buffers only if the entire folio is being invalidated. * The get_block cached value has been unconditionally invalidated, * so real IO is not possible anymore. */ - if (length == PAGE_SIZE) - try_to_release_page(page, 0); + if (length == folio_size(folio)) + filemap_release_folio(folio, 0); out: return; } -EXPORT_SYMBOL(block_invalidatepage); +EXPORT_SYMBOL(block_invalidate_folio); /* diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 7d85e64ea62f..bf7f35b375b7 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -546,6 +546,7 @@ const struct address_space_operations ecryptfs_aops = { */ #ifdef CONFIG_BLOCK .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, #endif .writepage = ecryptfs_writepage, .readpage = ecryptfs_readpage, diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index df805bd05508..5ed471eb973b 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -491,6 +491,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations exfat_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = exfat_readpage, .readahead = exfat_readahead, .writepage = exfat_writepage, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 602578b72d8c..1e14777c3ca6 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -968,6 +968,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc const struct address_space_operations ext2_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_writepage, @@ -983,6 +984,7 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_nobh_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 57800ecbe466..07ef3f84db9e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -137,8 +137,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode, new_size); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); @@ -1571,16 +1569,18 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + struct folio *folio = page_folio(page); - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); + BUG_ON(!folio_test_locked(folio)); + BUG_ON(folio_test_writeback(folio)); if (invalidate) { - if (page_mapped(page)) - clear_page_dirty_for_io(page); - block_invalidatepage(page, 0, PAGE_SIZE); - ClearPageUptodate(page); + if (folio_mapped(folio)) + folio_clear_dirty_for_io(folio); + block_invalidate_folio(folio, 0, + folio_size(folio)); + folio_clear_uptodate(folio); } - unlock_page(page); + folio_unlock(folio); } pagevec_release(&pvec); } @@ -3183,15 +3183,15 @@ static void ext4_readahead(struct readahead_control *rac) ext4_mpage_readpages(inode, rac, NULL); } -static void ext4_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ext4_invalidate_folio(struct folio *folio, size_t offset, + size_t length) { - trace_ext4_invalidatepage(page, offset, length); + trace_ext4_invalidatepage(&folio->page, offset, length); /* No journalling happens on data buffers when this function is used */ - WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); + WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); - block_invalidatepage(page, offset, length); + block_invalidate_folio(folio, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, @@ -3583,7 +3583,7 @@ static const struct address_space_operations ext4_aops = { .write_end = ext4_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, - .invalidatepage = ext4_invalidatepage, + .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, @@ -3618,7 +3618,7 @@ static const struct address_space_operations ext4_da_aops = { .write_end = ext4_da_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, - .invalidatepage = ext4_invalidatepage, + .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a6f1c6d426d1..1e2f1e24a073 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -343,6 +343,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations fat_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = fat_readpage, .readahead = fat_readahead, .writepage = fat_writepage, diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 72d30a682ece..d23c8b035447 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -90,12 +90,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb const struct address_space_operations gfs2_meta_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; const struct address_space_operations gfs2_rgrp_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 2a5143246282..029d1869a224 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -160,6 +160,7 @@ static int hfs_writepages(struct address_space *mapping, const struct address_space_operations hfs_btree_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, .write_begin = hfs_write_begin, @@ -170,6 +171,7 @@ const struct address_space_operations hfs_btree_aops = { const struct address_space_operations hfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, .write_begin = hfs_write_begin, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index d08a8d1d40a4..a91b9b5e92a8 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -157,6 +157,7 @@ static int hfsplus_writepages(struct address_space *mapping, const struct address_space_operations hfsplus_btree_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, @@ -167,6 +168,7 @@ const struct address_space_operations hfsplus_btree_aops = { const struct address_space_operations hfsplus_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index fb37f57130aa..cf68f5e76ddd 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -246,6 +246,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, const struct address_space_operations hpfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = hpfs_readpage, .writepage = hpfs_writepage, .readahead = hpfs_readahead, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 57ab424c05ff..3950b3d610a0 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -358,6 +358,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations jfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = jfs_readpage, .readahead = jfs_readahead, .writepage = jfs_writepage, diff --git a/fs/minix/inode.c b/fs/minix/inode.c index a71f1cf894b9..2295804d1893 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -443,6 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations minix_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = minix_readpage, .writepage = minix_writepage, .write_begin = minix_write_begin, diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index e3d807d5b83a..153f0569dcf2 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -304,7 +304,7 @@ const struct address_space_operations nilfs_aops = { .write_begin = nilfs_write_begin, .write_end = nilfs_write_end, /* .releasepage = nilfs_releasepage, */ - .invalidatepage = block_invalidatepage, + .invalidate_folio = block_invalidate_folio, .direct_IO = nilfs_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, }; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 4b3d33cf0041..72adca629bc9 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -435,6 +435,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) static const struct address_space_operations def_mdt_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .writepage = nilfs_mdt_write_page, }; diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index bb0a43860ad2..6858bf6df49a 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -1350,12 +1350,13 @@ retry_writepage: /* Is the page fully outside i_size? (truncate in progress) */ if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> PAGE_SHIFT)) { + struct folio *folio = page_folio(page); /* * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0, PAGE_SIZE); - unlock_page(page); + block_invalidate_folio(folio, 0, folio_size(folio)); + folio_unlock(folio); ntfs_debug("Write outside i_size - truncated?"); return 0; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 498da317580a..b274061e22a7 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2461,7 +2461,7 @@ const struct address_space_operations ocfs2_aops = { .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, - .invalidatepage = block_invalidatepage, + .invalidate_folio = block_invalidate_folio, .releasepage = ocfs2_releasepage, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 89725b15a64b..139d6a21dca1 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -373,6 +373,7 @@ const struct inode_operations omfs_file_inops = { const struct address_space_operations omfs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = omfs_readpage, .readahead = omfs_readahead, .writepage = omfs_writepage, diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 749385015a8d..d39984a1d4d3 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -496,6 +496,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations sysv_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = sysv_readpage, .writepage = sysv_writepage, .write_begin = sysv_write_begin, diff --git a/fs/udf/file.c b/fs/udf/file.c index 1baff8ddb754..a91011a7bb88 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -126,6 +126,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin const struct address_space_operations udf_adinicb_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, .write_begin = udf_adinicb_write_begin, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ea8f6cd01f50..ab98c7aaf9f9 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -236,6 +236,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations udf_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = udf_readpage, .readahead = udf_readahead, .writepage = udf_writepage, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index ac628de69601..2d005788c24d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -527,6 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations ufs_aops = { .set_page_dirty = __set_page_dirty_buffers, + .invalidate_folio = block_invalidate_folio, .readpage = ufs_readpage, .writepage = ufs_writepage, .write_begin = ufs_write_begin, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 79d465057889..9ee9d003d736 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -217,8 +217,7 @@ extern int buffer_heads_over_limit; * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int __block_write_full_page(struct inode *inode, struct page *page, diff --git a/mm/truncate.c b/mm/truncate.c index b9ad298e6ce7..28650151091a 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -163,10 +163,6 @@ void folio_invalidate(struct folio *folio, size_t offset, size_t length) } invalidatepage = aops->invalidatepage; -#ifdef CONFIG_BLOCK - if (!invalidatepage) - invalidatepage = block_invalidatepage; -#endif if (invalidatepage) (*invalidatepage)(&folio->page, offset, length); } -- cgit v1.2.3-71-gd317 From 5660a8630dab61a28e07ec00c42bf605b182d725 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:35 +0000 Subject: fs: Remove noop_invalidatepage() We used to have to use noop_invalidatepage() to prevent block_invalidatepage() from being called, but that behaviour is now gone. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- drivers/dax/device.c | 1 - fs/ext2/inode.c | 1 - fs/ext4/inode.c | 1 - fs/fuse/dax.c | 1 - fs/libfs.c | 11 ----------- fs/xfs/xfs_aops.c | 1 - include/linux/fs.h | 2 -- 7 files changed, 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index d33a0613ed0c..7a59ca51217e 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -347,7 +347,6 @@ static unsigned long dax_get_unmapped_area(struct file *filp, static const struct address_space_operations dev_dax_aops = { .set_page_dirty = __set_page_dirty_no_writeback, - .invalidatepage = noop_invalidatepage, }; static int dax_open(struct inode *inode, struct file *filp) diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 1e14777c3ca6..9b579ee56eaf 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1001,7 +1001,6 @@ static const struct address_space_operations ext2_dax_aops = { .writepages = ext2_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = __set_page_dirty_no_writeback, - .invalidatepage = noop_invalidatepage, }; /* diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 07ef3f84db9e..d7086209572a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3632,7 +3632,6 @@ static const struct address_space_operations ext4_dax_aops = { .direct_IO = noop_direct_IO, .set_page_dirty = __set_page_dirty_no_writeback, .bmap = ext4_bmap, - .invalidatepage = noop_invalidatepage, .swap_activate = ext4_iomap_swap_activate, }; diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 182b24a14804..b11fa10b88d8 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -1327,7 +1327,6 @@ static const struct address_space_operations fuse_dax_file_aops = { .writepages = fuse_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = __set_page_dirty_no_writeback, - .invalidatepage = noop_invalidatepage, }; static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags) diff --git a/fs/libfs.c b/fs/libfs.c index 974125270a42..4e047841e61d 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1198,17 +1198,6 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) } EXPORT_SYMBOL(noop_fsync); -void noop_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) -{ - /* - * There is no page cache to invalidate in the dax case, however - * we need this callback defined to prevent falling back to - * block_invalidatepage() in do_invalidatepage(). - */ -} -EXPORT_SYMBOL_GPL(noop_invalidatepage); - ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { /* diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 51a040b658cb..7dd314f2288f 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -582,6 +582,5 @@ const struct address_space_operations xfs_dax_aops = { .writepages = xfs_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = __set_page_dirty_no_writeback, - .invalidatepage = noop_invalidatepage, .swap_activate = xfs_iomap_swapfile_activate, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index bcdb613cd652..a40ea82248da 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3323,8 +3323,6 @@ extern int simple_rename(struct user_namespace *, struct inode *, extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); -extern void noop_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_write_begin(struct file *file, struct address_space *mapping, -- cgit v1.2.3-71-gd317 From ccd16945dba091fdf1036d7711b9f6cbd287ae28 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:43 +0000 Subject: ext4: Convert invalidatepage to invalidate_folio Extensive changes, but fairly mechanical. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- fs/ext4/inode.c | 56 ++++++++++++++++++++++----------------------- fs/jbd2/journal.c | 2 +- fs/jbd2/transaction.c | 31 ++++++++++++------------- include/linux/jbd2.h | 4 ++-- include/linux/pagemap.h | 18 +++++++++++++++ include/trace/events/ext4.h | 30 ++++++++++++------------ 6 files changed, 78 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d7086209572a..678ba122f8b1 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -184,7 +184,7 @@ void ext4_evict_inode(struct inode *inode) * journal. So although mm thinks everything is clean and * ready for reaping the inode might still have some pages to * write in the running transaction or waiting to be - * checkpointed. Thus calling jbd2_journal_invalidatepage() + * checkpointed. Thus calling jbd2_journal_invalidate_folio() * (via truncate_inode_pages()) to discard these buffers can * cause data loss. Also even if we did not discard these * buffers, we would have no way to find them after the inode @@ -3186,7 +3186,7 @@ static void ext4_readahead(struct readahead_control *rac) static void ext4_invalidate_folio(struct folio *folio, size_t offset, size_t length) { - trace_ext4_invalidatepage(&folio->page, offset, length); + trace_ext4_invalidate_folio(folio, offset, length); /* No journalling happens on data buffers when this function is used */ WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); @@ -3194,29 +3194,28 @@ static void ext4_invalidate_folio(struct folio *folio, size_t offset, block_invalidate_folio(folio, offset, length); } -static int __ext4_journalled_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) +static int __ext4_journalled_invalidate_folio(struct folio *folio, + size_t offset, size_t length) { - journal_t *journal = EXT4_JOURNAL(page->mapping->host); + journal_t *journal = EXT4_JOURNAL(folio->mapping->host); - trace_ext4_journalled_invalidatepage(page, offset, length); + trace_ext4_journalled_invalidate_folio(folio, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0 && length == PAGE_SIZE) - ClearPageChecked(page); + if (offset == 0 && length == folio_size(folio)) + folio_clear_checked(folio); - return jbd2_journal_invalidatepage(journal, page, offset, length); + return jbd2_journal_invalidate_folio(journal, folio, offset, length); } /* Wrapper for aops... */ -static void ext4_journalled_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) +static void ext4_journalled_invalidate_folio(struct folio *folio, + size_t offset, + size_t length) { - WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); + WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) @@ -3601,7 +3600,7 @@ static const struct address_space_operations ext4_journalled_aops = { .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, - .invalidatepage = ext4_journalled_invalidatepage, + .invalidate_folio = ext4_journalled_invalidate_folio, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, @@ -5204,13 +5203,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) } /* - * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate - * buffers that are attached to a page stradding i_size and are undergoing + * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate + * buffers that are attached to a folio straddling i_size and are undergoing * commit. In that case we have to wait for commit to finish and try again. */ static void ext4_wait_for_tail_page_commit(struct inode *inode) { - struct page *page; unsigned offset; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = 0; @@ -5218,25 +5216,25 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) offset = inode->i_size & (PAGE_SIZE - 1); /* - * If the page is fully truncated, we don't need to wait for any commit - * (and we even should not as __ext4_journalled_invalidatepage() may - * strip all buffers from the page but keep the page dirty which can then - * confuse e.g. concurrent ext4_writepage() seeing dirty page without + * If the folio is fully truncated, we don't need to wait for any commit + * (and we even should not as __ext4_journalled_invalidate_folio() may + * strip all buffers from the folio but keep the folio dirty which can then + * confuse e.g. concurrent ext4_writepage() seeing dirty folio without * buffers). Also we don't need to wait for any commit if all buffers in - * the page remain valid. This is most beneficial for the common case of + * the folio remain valid. This is most beneficial for the common case of * blocksize == PAGESIZE. */ if (!offset || offset > (PAGE_SIZE - i_blocksize(inode))) return; while (1) { - page = find_lock_page(inode->i_mapping, + struct folio *folio = filemap_lock_folio(inode->i_mapping, inode->i_size >> PAGE_SHIFT); - if (!page) + if (!folio) return; - ret = __ext4_journalled_invalidatepage(page, offset, - PAGE_SIZE - offset); - unlock_page(page); - put_page(page); + ret = __ext4_journalled_invalidate_folio(folio, offset, + folio_size(folio) - offset); + folio_unlock(folio); + folio_put(folio); if (ret != -EBUSY) return; commit_tid = 0; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c2cf74b01ddb..fcacafa4510d 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -86,7 +86,7 @@ EXPORT_SYMBOL(jbd2_journal_start_commit); EXPORT_SYMBOL(jbd2_journal_force_commit_nested); EXPORT_SYMBOL(jbd2_journal_wipe); EXPORT_SYMBOL(jbd2_journal_blocks_per_page); -EXPORT_SYMBOL(jbd2_journal_invalidatepage); +EXPORT_SYMBOL(jbd2_journal_invalidate_folio); EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); EXPORT_SYMBOL(jbd2_journal_force_commit); EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 8e2f8275a253..d988016034e2 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2219,14 +2219,14 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) } /* - * jbd2_journal_invalidatepage + * jbd2_journal_invalidate_folio * * This code is tricky. It has a number of cases to deal with. * * There are two invariants which this code relies on: * - * i_size must be updated on disk before we start calling invalidatepage on the - * data. + * i_size must be updated on disk before we start calling invalidate_folio + * on the data. * * This is done in ext3 by defining an ext3_setattr method which * updates i_size before truncate gets going. By maintaining this @@ -2428,9 +2428,9 @@ zap_buffer_unlocked: } /** - * jbd2_journal_invalidatepage() + * jbd2_journal_invalidate_folio() * @journal: journal to use for flush... - * @page: page to flush + * @folio: folio to flush * @offset: start of the range to invalidate * @length: length of the range to invalidate * @@ -2439,30 +2439,29 @@ zap_buffer_unlocked: * the page is straddling i_size. Caller then has to wait for current commit * and try again. */ -int jbd2_journal_invalidatepage(journal_t *journal, - struct page *page, - unsigned int offset, - unsigned int length) +int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio, + size_t offset, size_t length) { struct buffer_head *head, *bh, *next; unsigned int stop = offset + length; unsigned int curr_off = 0; - int partial_page = (offset || length < PAGE_SIZE); + int partial_page = (offset || length < folio_size(folio)); int may_free = 1; int ret = 0; - if (!PageLocked(page)) + if (!folio_test_locked(folio)) BUG(); - if (!page_has_buffers(page)) + head = folio_buffers(folio); + if (!head) return 0; - BUG_ON(stop > PAGE_SIZE || stop < length); + BUG_ON(stop > folio_size(folio) || stop < length); /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ - head = bh = page_buffers(page); + bh = head; do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; @@ -2485,8 +2484,8 @@ int jbd2_journal_invalidatepage(journal_t *journal, } while (bh != head); if (!partial_page) { - if (may_free && try_to_free_buffers(page)) - J_ASSERT(!page_has_buffers(page)); + if (may_free && try_to_free_buffers(&folio->page)) + J_ASSERT(!folio_buffers(folio)); } return 0; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 9c3ada74ffb1..1b9d1e205a2f 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1530,8 +1530,8 @@ void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); -extern int jbd2_journal_invalidatepage(journal_t *, - struct page *, unsigned int, unsigned int); +int jbd2_journal_invalidate_folio(journal_t *, struct folio *, + size_t offset, size_t length); extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 4503d5baa252..6a9617e9c6bc 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -422,6 +422,24 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping, return __filemap_get_folio(mapping, index, 0, 0); } +/** + * filemap_lock_folio - Find and lock a folio. + * @mapping: The address_space to search. + * @index: The page index. + * + * Looks up the page cache entry at @mapping & @index. If a folio is + * present, it is returned locked with an increased refcount. + * + * Context: May sleep. + * Return: A folio or %NULL if there is no folio in the cache for this + * index. Will not return a shadow, swap or DAX entry. + */ +static inline struct folio *filemap_lock_folio(struct address_space *mapping, + pgoff_t index) +{ + return __filemap_get_folio(mapping, index, FGP_LOCK, 0); +} + /** * find_get_page - find and get a page reference * @mapping: the address_space to search diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 19e957b7f941..40cca0e5a811 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -597,44 +597,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage, TP_ARGS(page) ); -DECLARE_EVENT_CLASS(ext4_invalidatepage_op, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DECLARE_EVENT_CLASS(ext4_invalidate_folio_op, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length), + TP_ARGS(folio, offset, length), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( pgoff_t, index ) - __field( unsigned int, offset ) - __field( unsigned int, length ) + __field( size_t, offset ) + __field( size_t, length ) ), TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; - __entry->index = page->index; + __entry->dev = folio->mapping->host->i_sb->s_dev; + __entry->ino = folio->mapping->host->i_ino; + __entry->index = folio->index; __entry->offset = offset; __entry->length = length; ), - TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned long) __entry->index, __entry->offset, __entry->length) ); -DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length) + TP_ARGS(folio, offset, length) ); -DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length) + TP_ARGS(folio, offset, length) ); TRACE_EVENT(ext4_discard_blocks, -- cgit v1.2.3-71-gd317 From 6d740c76ea86053208b20c41fb5ec1de07acb996 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:47 +0000 Subject: nfs: Convert from invalidatepage to invalidate_folio Print the folio index instead of the pointer, since this is more useful. We also don't need to use page_file_mapping() as we do not invalidate swapcache pages. Since this is the only caller of nfs_wb_page_cancel(), convert it to nfs_wb_folio_cancel(). Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- fs/nfs/file.c | 16 ++++++++-------- fs/nfs/write.c | 8 ++++---- include/linux/nfs_fs.h | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 76d76acbc594..79664f04d74b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -406,17 +406,17 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, * - Called if either PG_private or PG_fscache is set on the page * - Caller holds page lock */ -static void nfs_invalidate_page(struct page *page, unsigned int offset, - unsigned int length) +static void nfs_invalidate_folio(struct folio *folio, size_t offset, + size_t length) { - dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", - page, offset, length); + dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n", + folio->index, offset, length); - if (offset != 0 || length < PAGE_SIZE) + if (offset != 0 || length < folio_size(folio)) return; /* Cancel any unstarted writes on this page */ - nfs_wb_page_cancel(page_file_mapping(page)->host, page); - wait_on_page_fscache(page); + nfs_wb_folio_cancel(folio->mapping->host, folio); + folio_wait_fscache(folio); } /* @@ -520,7 +520,7 @@ const struct address_space_operations nfs_file_aops = { .writepages = nfs_writepages, .write_begin = nfs_write_begin, .write_end = nfs_write_end, - .invalidatepage = nfs_invalidate_page, + .invalidate_folio = nfs_invalidate_folio, .releasepage = nfs_release_page, .direct_IO = nfs_direct_IO, #ifdef CONFIG_MIGRATION diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 987a187bd39a..58746afb97ab 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -2049,21 +2049,21 @@ out: } EXPORT_SYMBOL_GPL(nfs_wb_all); -int nfs_wb_page_cancel(struct inode *inode, struct page *page) +int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) { struct nfs_page *req; int ret = 0; - wait_on_page_writeback(page); + folio_wait_writeback(folio); /* blocking call to cancel all requests and join to a single (head) * request */ - req = nfs_lock_and_join_requests(page); + req = nfs_lock_and_join_requests(&folio->page); if (IS_ERR(req)) { ret = PTR_ERR(req); } else if (req) { - /* all requests from this page have been cancelled by + /* all requests from this folio have been cancelled by * nfs_lock_and_join_requests, so just remove the head * request from the inode / page_private pointer and * release it */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 68f81d8d36de..784120cc217e 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -583,7 +583,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); -extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); +int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); -- cgit v1.2.3-71-gd317 From f50015a596fa106bf642bd85fbf6e6b52cc913d0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:51 +0000 Subject: fs: Remove aops->invalidatepage With all users migrated to ->invalidate_folio, remove the old operation. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 2 -- Documentation/filesystems/vfs.rst | 1 - include/linux/fs.h | 1 - mm/truncate.c | 14 +++----------- 4 files changed, 3 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 29a045fd3860..8e9cbc0fb70f 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -251,7 +251,6 @@ prototypes:: struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); int (*direct_IO)(struct kiocb *, struct iov_iter *iter); @@ -280,7 +279,6 @@ write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: invalidate_folio: yes exclusive -invalidatepage: yes exclusive releasepage: yes freepage: yes direct_IO: diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 26c090cd8cf5..28704831652c 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -736,7 +736,6 @@ cache in your filesystem. The following members are defined: struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, int); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); diff --git a/include/linux/fs.h b/include/linux/fs.h index a40ea82248da..af9ae091bd82 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -388,7 +388,6 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t offset, size_t len); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); diff --git a/mm/truncate.c b/mm/truncate.c index 28650151091a..8010461a59bd 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -19,8 +19,7 @@ #include #include #include -#include /* grr. try_to_release_page, - do_invalidatepage */ +#include /* grr. try_to_release_page */ #include #include #include "internal.h" @@ -155,16 +154,9 @@ static int invalidate_exceptional_entry2(struct address_space *mapping, void folio_invalidate(struct folio *folio, size_t offset, size_t length) { const struct address_space_operations *aops = folio->mapping->a_ops; - void (*invalidatepage)(struct page *, unsigned int, unsigned int); - if (aops->invalidate_folio) { + if (aops->invalidate_folio) aops->invalidate_folio(folio, offset, length); - return; - } - - invalidatepage = aops->invalidatepage; - if (invalidatepage) - (*invalidatepage)(&folio->page, offset, length); } EXPORT_SYMBOL_GPL(folio_invalidate); @@ -334,7 +326,7 @@ int invalidate_inode_page(struct page *page) * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. * - * Note that since ->invalidatepage() accepts range to invalidate + * Note that since ->invalidate_folio() accepts range to invalidate * truncate_inode_pages_range is able to handle cases where lend + 1 is not * page aligned properly. */ -- cgit v1.2.3-71-gd317 From affa80e8c6a1df473694c2087259901872309cc4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:52 +0000 Subject: fs: Add aops->launder_folio Since the only difference between ->launder_page and ->launder_folio is the type of the pointer, these can safely use a union without affecting bisectability. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 10 +++++----- Documentation/filesystems/vfs.rst | 8 ++++---- include/linux/fs.h | 5 ++++- mm/truncate.c | 8 ++++---- 4 files changed, 17 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 8e9cbc0fb70f..dee512efb458 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -257,7 +257,7 @@ prototypes:: bool (*isolate_page) (struct page *, isolate_mode_t); int (*migratepage)(struct address_space *, struct page *, struct page *); void (*putback_page) (struct page *); - int (*launder_page)(struct page *); + int (*launder_folio)(struct folio *); bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct file *); @@ -285,7 +285,7 @@ direct_IO: isolate_page: yes migratepage: yes (both) putback_page: yes -launder_page: yes +launder_folio: yes is_partially_uptodate: yes error_remove_page: yes swap_activate: no @@ -385,9 +385,9 @@ the kernel assumes that the fs has no private interest in the buffers. ->freepage() is called when the kernel is done dropping the page from the page cache. -->launder_page() may be called prior to releasing a page if -it is still found to be dirty. It returns zero if the page was successfully -cleaned, or an error value if not. Note that in order to prevent the page +->launder_folio() may be called prior to releasing a folio if +it is still found to be dirty. It returns zero if the folio was successfully +cleaned, or an error value if not. Note that in order to prevent the folio getting mapped back in and redirtied, it needs to be kept locked across the entire operation. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 28704831652c..c54ca4d88ed6 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -745,7 +745,7 @@ cache in your filesystem. The following members are defined: int (*migratepage) (struct page *, struct page *); /* put migration-failed page back to right list */ void (*putback_page) (struct page *); - int (*launder_page) (struct page *); + int (*launder_folio) (struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); @@ -930,9 +930,9 @@ cache in your filesystem. The following members are defined: ``putback_page`` Called by the VM when isolated page's migration fails. -``launder_page`` - Called before freeing a page - it writes back the dirty page. - To prevent redirtying the page, it is kept locked during the +``launder_folio`` + Called before freeing a folio - it writes back the dirty folio. + To prevent redirtying the folio, it is kept locked during the whole operation. ``is_partially_uptodate`` diff --git a/include/linux/fs.h b/include/linux/fs.h index af9ae091bd82..0af3075cdff2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -399,7 +399,10 @@ struct address_space_operations { struct page *, struct page *, enum migrate_mode); bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); - int (*launder_page) (struct page *); + union { + int (*launder_page) (struct page *); + int (*launder_folio) (struct folio *); + }; bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); diff --git a/mm/truncate.c b/mm/truncate.c index 8010461a59bd..6ad44b546dff 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -614,13 +614,13 @@ failed: return 0; } -static int do_launder_folio(struct address_space *mapping, struct folio *folio) +static int folio_launder(struct address_space *mapping, struct folio *folio) { if (!folio_test_dirty(folio)) return 0; - if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL) + if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) return 0; - return mapping->a_ops->launder_page(&folio->page); + return mapping->a_ops->launder_folio(folio); } /** @@ -686,7 +686,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, unmap_mapping_folio(folio); BUG_ON(folio_mapped(folio)); - ret2 = do_launder_folio(mapping, folio); + ret2 = folio_launder(mapping, folio); if (ret2 == 0) { if (!invalidate_complete_folio2(mapping, folio)) ret2 = -EBUSY; -- cgit v1.2.3-71-gd317 From 072acba6d08730beba5bad293c7ce6d0c4b0624c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:21:59 +0000 Subject: fs: Remove aops->launder_page With all users converted to ->launder_folio, remove ->launder_page. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/fs.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 0af3075cdff2..055be40084f1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -399,10 +399,7 @@ struct address_space_operations { struct page *, struct page *, enum migrate_mode); bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); - union { - int (*launder_page) (struct page *); - int (*launder_folio) (struct folio *); - }; + int (*launder_folio)(struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); -- cgit v1.2.3-71-gd317 From 6f31a5a261dbbe7bf7f585dfe81f8acd4b25ec3b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:00 +0000 Subject: fs: Add aops->dirty_folio This replaces ->set_page_dirty(). It returns a bool instead of an int and takes the address_space as a parameter instead of expecting the implementations to retrieve the address_space from the page. This is particularly important for filesystems which use FS_OPS for swap. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/locking.rst | 15 ++++++++------- Documentation/filesystems/vfs.rst | 16 ++++++++-------- include/linux/fs.h | 1 + mm/page-writeback.c | 17 ++++++++++------- mm/page_io.c | 5 ++++- 5 files changed, 31 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index dee512efb458..72fa12dabd39 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -239,7 +239,7 @@ prototypes:: int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); - int (*set_page_dirty)(struct page *page); + bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); @@ -264,7 +264,7 @@ prototypes:: int (*swap_deactivate)(struct file *); locking rules: - All except set_page_dirty and freepage may block + All except dirty_folio and freepage may block ====================== ======================== ========= =============== ops PageLocked(page) i_rwsem invalidate_lock @@ -272,7 +272,7 @@ ops PageLocked(page) i_rwsem invalidate_lock writepage: yes, unlocks (see below) readpage: yes, unlocks shared writepages: -set_page_dirty no +dirty_folio maybe readahead: yes, unlocks shared readpages: no shared write_begin: locks the page exclusive @@ -361,10 +361,11 @@ If nr_to_write is NULL, all dirty pages must be written. writepages should _only_ write pages which are present on mapping->io_pages. -->set_page_dirty() is called from various places in the kernel -when the target page is marked as needing writeback. It may be called -under spinlock (it cannot block) and is sometimes called with the page -not locked. +->dirty_folio() is called from various places in the kernel when +the target folio is marked as needing writeback. The folio cannot be +truncated because either the caller holds the folio lock, or the caller +has found the folio while holding the page table lock which will block +truncation. ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some filesystems and by the swapper. The latter will eventually go away. Please, diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index c54ca4d88ed6..d16bee420326 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -658,7 +658,7 @@ pages, however the address_space has finer control of write sizes. The read process essentially only requires 'readpage'. The write process is more complicated and uses write_begin/write_end or -set_page_dirty to write data into the address_space, and writepage and +dirty_folio to write data into the address_space, and writepage and writepages to writeback data to storage. Adding and removing pages to/from an address_space is protected by the @@ -724,7 +724,7 @@ cache in your filesystem. The following members are defined: int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); - int (*set_page_dirty)(struct page *page); + bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); @@ -793,13 +793,13 @@ cache in your filesystem. The following members are defined: This will choose pages from the address space that are tagged as DIRTY and will pass them to ->writepage. -``set_page_dirty`` - called by the VM to set a page dirty. This is particularly - needed if an address space attaches private data to a page, and - that data needs to be updated when a page is dirtied. This is +``dirty_folio`` + called by the VM to mark a folio as dirty. This is particularly + needed if an address space attaches private data to a folio, and + that data needs to be updated when a folio is dirtied. This is called, for example, when a memory mapped page gets modified. - If defined, it should set the PageDirty flag, and the - PAGECACHE_TAG_DIRTY tag in the radix tree. + If defined, it should set the folio dirty flag, and the + PAGECACHE_TAG_DIRTY search mark in i_pages. ``readahead`` Called by the VM to read pages associated with the address_space diff --git a/include/linux/fs.h b/include/linux/fs.h index 055be40084f1..c3d5db8851ae 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -369,6 +369,7 @@ struct address_space_operations { /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); + bool (*dirty_folio)(struct address_space *, struct folio *); /* * Reads in the requested pages. Unlike ->readpage(), this is diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 91d163f8d36b..27a87ae4502c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2616,7 +2616,7 @@ EXPORT_SYMBOL(folio_redirty_for_writepage); * folio_mark_dirty - Mark a folio as being modified. * @folio: The folio. * - * For folios with a mapping this should be done under the page lock + * For folios with a mapping this should be done with the folio lock held * for the benefit of asynchronous memory errors who prefer a consistent * dirty state. This rule can be broken in some special cases, * but should be better not to. @@ -2630,16 +2630,19 @@ bool folio_mark_dirty(struct folio *folio) if (likely(mapping)) { /* * readahead/lru_deactivate_page could remain - * PG_readahead/PG_reclaim due to race with end_page_writeback - * About readahead, if the page is written, the flags would be + * PG_readahead/PG_reclaim due to race with folio_end_writeback + * About readahead, if the folio is written, the flags would be * reset. So no problem. - * About lru_deactivate_page, if the page is redirty, the flag - * will be reset. So no problem. but if the page is used by readahead - * it will confuse readahead and make it restart the size rampup - * process. But it's a trivial problem. + * About lru_deactivate_page, if the folio is redirtied, + * the flag will be reset. So no problem. but if the + * folio is used by readahead it will confuse readahead + * and make it restart the size rampup process. But it's + * a trivial problem. */ if (folio_test_reclaim(folio)) folio_clear_reclaim(folio); + if (mapping->a_ops->dirty_folio) + return mapping->a_ops->dirty_folio(mapping, folio); return mapping->a_ops->set_page_dirty(&folio->page); } if (!folio_test_dirty(folio)) { diff --git a/mm/page_io.c b/mm/page_io.c index 0bf8e40f4e57..24c975fb4e21 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -444,9 +444,12 @@ int swap_set_page_dirty(struct page *page) if (data_race(sis->flags & SWP_FS_OPS)) { struct address_space *mapping = sis->swap_file->f_mapping; + const struct address_space_operations *aops = mapping->a_ops; VM_BUG_ON_PAGE(!PageSwapCache(page), page); - return mapping->a_ops->set_page_dirty(page); + if (aops->dirty_folio) + return aops->dirty_folio(mapping, page_folio(page)); + return aops->set_page_dirty(page); } else { return __set_page_dirty_no_writeback(page); } -- cgit v1.2.3-71-gd317 From 8fb72b4a76933ae6f86725cc8e4a8190ba84d755 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:01 +0000 Subject: fscache: Convert fscache_set_page_dirty() to fscache_dirty_folio() Convert all users of fscache_set_page_dirty to use fscache_dirty_folio. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- Documentation/filesystems/caching/netfs-api.rst | 7 ++++--- fs/9p/vfs_addr.c | 10 ++++----- fs/afs/file.c | 2 +- fs/afs/internal.h | 4 ++-- fs/afs/write.c | 5 +++-- fs/ceph/addr.c | 27 ++++++++++++------------ fs/ceph/cache.h | 13 ++++++------ fs/cifs/file.c | 11 +++++----- fs/fscache/io.c | 28 +++++++++++++------------ include/linux/fscache.h | 8 ++++--- 10 files changed, 61 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst index f84e9ffdf0b4..5066113acad5 100644 --- a/Documentation/filesystems/caching/netfs-api.rst +++ b/Documentation/filesystems/caching/netfs-api.rst @@ -345,8 +345,9 @@ The following facilities are provided to manage this: To support this, the following functions are provided:: - int fscache_set_page_dirty(struct page *page, - struct fscache_cookie *cookie); + bool fscache_dirty_folio(struct address_space *mapping, + struct folio *folio, + struct fscache_cookie *cookie); void fscache_unpin_writeback(struct writeback_control *wbc, struct fscache_cookie *cookie); void fscache_clear_inode_writeback(struct fscache_cookie *cookie, @@ -354,7 +355,7 @@ To support this, the following functions are provided:: const void *aux); The *set* function is intended to be called from the filesystem's -``set_page_dirty`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not +``dirty_folio`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not set, it sets that flag and increments the use count on the cookie (the caller must already have called ``fscache_use_cookie()``). diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index a4a9075890d5..76956c9d2af9 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -359,20 +359,20 @@ out: * Mark a page as having been made dirty and thus needing writeback. We also * need to pin the cache object to write back to. */ -static int v9fs_set_page_dirty(struct page *page) +static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio) { - struct v9fs_inode *v9inode = V9FS_I(page->mapping->host); + struct v9fs_inode *v9inode = V9FS_I(mapping->host); - return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode)); + return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode)); } #else -#define v9fs_set_page_dirty __set_page_dirty_nobuffers +#define v9fs_dirty_folio filemap_dirty_folio #endif const struct address_space_operations v9fs_addr_operations = { .readpage = v9fs_vfs_readpage, .readahead = v9fs_vfs_readahead, - .set_page_dirty = v9fs_set_page_dirty, + .dirty_folio = v9fs_dirty_folio, .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, .write_end = v9fs_write_end, diff --git a/fs/afs/file.c b/fs/afs/file.c index 56b20b922751..0f9fdb284a20 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -54,7 +54,7 @@ const struct inode_operations afs_file_inode_operations = { const struct address_space_operations afs_file_aops = { .readpage = afs_readpage, .readahead = afs_readahead, - .set_page_dirty = afs_set_page_dirty, + .dirty_folio = afs_dirty_folio, .launder_folio = afs_launder_folio, .releasepage = afs_releasepage, .invalidate_folio = afs_invalidate_folio, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 4023d8e6ab30..dc5032e10244 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1521,9 +1521,9 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *); * write.c */ #ifdef CONFIG_AFS_FSCACHE -extern int afs_set_page_dirty(struct page *); +bool afs_dirty_folio(struct address_space *, struct folio *); #else -#define afs_set_page_dirty __set_page_dirty_nobuffers +#define afs_dirty_folio filemap_dirty_folio #endif extern int afs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, diff --git a/fs/afs/write.c b/fs/afs/write.c index 5864411bd006..88861613734e 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -22,9 +22,10 @@ static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len * Mark a page as having been made dirty and thus needing writeback. We also * need to pin the cache object to write back to. */ -int afs_set_page_dirty(struct page *page) +bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) { - return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host))); + return fscache_dirty_folio(mapping, folio, + afs_vnode_cache(AFS_FS_I(mapping->host))); } static void afs_folio_start_fscache(bool caching, struct folio *folio) { diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 09fd7a02586c..f40c34f4f526 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -76,18 +76,17 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page) * Dirty a page. Optimistically adjust accounting, on the assumption * that we won't race with invalidate. If we do, readjust. */ -static int ceph_set_page_dirty(struct page *page) +static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) { - struct address_space *mapping = page->mapping; struct inode *inode; struct ceph_inode_info *ci; struct ceph_snap_context *snapc; - if (PageDirty(page)) { - dout("%p set_page_dirty %p idx %lu -- already dirty\n", - mapping->host, page, page->index); - BUG_ON(!PagePrivate(page)); - return 0; + if (folio_test_dirty(folio)) { + dout("%p dirty_folio %p idx %lu -- already dirty\n", + mapping->host, folio, folio->index); + BUG_ON(!folio_get_private(folio)); + return false; } inode = mapping->host; @@ -111,22 +110,22 @@ static int ceph_set_page_dirty(struct page *page) if (ci->i_wrbuffer_ref == 0) ihold(inode); ++ci->i_wrbuffer_ref; - dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " + dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d " "snapc %p seq %lld (%d snaps)\n", - mapping->host, page, page->index, + mapping->host, folio, folio->index, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, snapc, snapc->seq, snapc->num_snaps); spin_unlock(&ci->i_ceph_lock); /* - * Reference snap context in page->private. Also set + * Reference snap context in folio->private. Also set * PagePrivate so that we get invalidate_folio callback. */ - BUG_ON(PagePrivate(page)); - attach_page_private(page, snapc); + BUG_ON(folio_get_private(folio)); + folio_attach_private(folio, snapc); - return ceph_fscache_set_page_dirty(page); + return ceph_fscache_dirty_folio(mapping, folio); } /* @@ -1376,7 +1375,7 @@ const struct address_space_operations ceph_aops = { .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, .write_end = ceph_write_end, - .set_page_dirty = ceph_set_page_dirty, + .dirty_folio = ceph_dirty_folio, .invalidate_folio = ceph_invalidate_folio, .releasepage = ceph_releasepage, .direct_IO = noop_direct_IO, diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 09164389fa66..b90f3016994d 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -54,12 +54,12 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode, fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode))); } -static inline int ceph_fscache_set_page_dirty(struct page *page) +static inline int ceph_fscache_dirty_folio(struct address_space *mapping, + struct folio *folio) { - struct inode *inode = page->mapping->host; - struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_inode_info *ci = ceph_inode(mapping->host); - return fscache_set_page_dirty(page, ceph_fscache_cookie(ci)); + return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci)); } static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) @@ -133,9 +133,10 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode, { } -static inline int ceph_fscache_set_page_dirty(struct page *page) +static inline int ceph_fscache_dirty_folio(struct address_space *mapping, + struct folio *folio) { - return __set_page_dirty_nobuffers(page); + return filemap_dirty_folio(mapping, folio); } static inline bool ceph_is_cache_enabled(struct inode *inode) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 3fe3c5552b39..8a2e9025bdb3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4939,12 +4939,13 @@ static void cifs_swap_deactivate(struct file *file) * need to pin the cache object to write back to. */ #ifdef CONFIG_CIFS_FSCACHE -static int cifs_set_page_dirty(struct page *page) +static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio) { - return fscache_set_page_dirty(page, cifs_inode_cookie(page->mapping->host)); + return fscache_dirty_folio(mapping, folio, + cifs_inode_cookie(mapping->host)); } #else -#define cifs_set_page_dirty __set_page_dirty_nobuffers +#define cifs_dirty_folio filemap_dirty_folio #endif const struct address_space_operations cifs_addr_ops = { @@ -4954,7 +4955,7 @@ const struct address_space_operations cifs_addr_ops = { .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, - .set_page_dirty = cifs_set_page_dirty, + .dirty_folio = cifs_dirty_folio, .releasepage = cifs_release_page, .direct_IO = cifs_direct_io, .invalidate_folio = cifs_invalidate_folio, @@ -4979,7 +4980,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = { .writepages = cifs_writepages, .write_begin = cifs_write_begin, .write_end = cifs_write_end, - .set_page_dirty = cifs_set_page_dirty, + .dirty_folio = cifs_dirty_folio, .releasepage = cifs_release_page, .invalidate_folio = cifs_invalidate_folio, .launder_folio = cifs_launder_folio, diff --git a/fs/fscache/io.c b/fs/fscache/io.c index 7a769ea57720..c8c7fe9e9a6e 100644 --- a/fs/fscache/io.c +++ b/fs/fscache/io.c @@ -159,27 +159,29 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres, EXPORT_SYMBOL(__fscache_begin_write_operation); /** - * fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback - * @page: The page being dirtied + * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback + * @mapping: The mapping the folio belongs to. + * @folio: The folio being dirtied. * @cookie: The cookie referring to the cache object * - * Set the dirty flag on a page and pin an in-use cache object in memory when - * dirtying a page so that writeback can later write to it. This is intended - * to be called from the filesystem's ->set_page_dirty() method. + * Set the dirty flag on a folio and pin an in-use cache object in memory + * so that writeback can later write to it. This is intended + * to be called from the filesystem's ->dirty_folio() method. * - * Returns 1 if PG_dirty was set on the page, 0 otherwise. + * Return: true if the dirty flag was set on the folio, false otherwise. */ -int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) +bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, + struct fscache_cookie *cookie) { - struct inode *inode = page->mapping->host; + struct inode *inode = mapping->host; bool need_use = false; _enter(""); - if (!__set_page_dirty_nobuffers(page)) - return 0; + if (!filemap_dirty_folio(mapping, folio)) + return false; if (!fscache_cookie_valid(cookie)) - return 1; + return true; if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { spin_lock(&inode->i_lock); @@ -192,9 +194,9 @@ int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) if (need_use) fscache_use_cookie(cookie, true); } - return 1; + return true; } -EXPORT_SYMBOL(fscache_set_page_dirty); +EXPORT_SYMBOL(fscache_dirty_folio); struct fscache_write_request { struct netfs_cache_resources cache_resources; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 296c5f1d9f35..d44ff747a657 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -616,9 +616,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, } #if __fscache_available -extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie); +bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, + struct fscache_cookie *cookie); #else -#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE))) +#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \ + filemap_dirty_folio(MAPPING, FOLIO) #endif /** @@ -626,7 +628,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook * @wbc: The writeback control * @cookie: The cookie referring to the cache object * - * Unpin the writeback resources pinned by fscache_set_page_dirty(). This is + * Unpin the writeback resources pinned by fscache_dirty_folio(). This is * intended to be called by the netfs's ->write_inode() method. */ static inline void fscache_unpin_writeback(struct writeback_control *wbc, -- cgit v1.2.3-71-gd317 From 7e63df00cf5e609ebbee5ffbc3df1900d8a4443c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:10 +0000 Subject: mm: Convert swap_set_page_dirty() to swap_dirty_folio() Straightforward conversion. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- include/linux/swap.h | 2 +- mm/page_io.c | 18 ++++++++++-------- mm/swap_state.c | 2 +- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 1d38d9475c4d..65a37e555124 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -427,7 +427,7 @@ extern int swap_writepage(struct page *page, struct writeback_control *wbc); extern void end_swap_bio_write(struct bio *bio); extern int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func); -extern int swap_set_page_dirty(struct page *page); +bool swap_dirty_folio(struct address_space *mapping, struct folio *folio); int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); diff --git a/mm/page_io.c b/mm/page_io.c index 24c975fb4e21..8f20f4dad289 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -438,19 +438,21 @@ out: return ret; } -int swap_set_page_dirty(struct page *page) +bool swap_dirty_folio(struct address_space *mapping, struct folio *folio) { - struct swap_info_struct *sis = page_swap_info(page); + struct swap_info_struct *sis = swp_swap_info(folio_swap_entry(folio)); if (data_race(sis->flags & SWP_FS_OPS)) { - struct address_space *mapping = sis->swap_file->f_mapping; - const struct address_space_operations *aops = mapping->a_ops; + const struct address_space_operations *aops; + + mapping = sis->swap_file->f_mapping; + aops = mapping->a_ops; - VM_BUG_ON_PAGE(!PageSwapCache(page), page); + VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); if (aops->dirty_folio) - return aops->dirty_folio(mapping, page_folio(page)); - return aops->set_page_dirty(page); + return aops->dirty_folio(mapping, folio); + return aops->set_page_dirty(&folio->page); } else { - return __set_page_dirty_no_writeback(page); + return __set_page_dirty_no_writeback(&folio->page); } } diff --git a/mm/swap_state.c b/mm/swap_state.c index 8d4104242100..4772afd08101 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -30,7 +30,7 @@ */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, - .set_page_dirty = swap_set_page_dirty, + .dirty_folio = swap_dirty_folio, #ifdef CONFIG_MIGRATION .migratepage = migrate_page, #endif -- cgit v1.2.3-71-gd317 From 938d3480b92fa5e454b7734294f12a7b75126f09 Mon Sep 17 00:00:00 2001 From: Wang Yufen Date: Fri, 4 Mar 2022 16:11:42 +0800 Subject: bpf, sockmap: Fix memleak in sk_psock_queue_msg If tcp_bpf_sendmsg is running during a tear down operation we may enqueue data on the ingress msg queue while tear down is trying to free it. sk1 (redirect sk2) sk2 ------------------- --------------- tcp_bpf_sendmsg() tcp_bpf_send_verdict() tcp_bpf_sendmsg_redir() bpf_tcp_ingress() sock_map_close() lock_sock() lock_sock() ... blocking sk_psock_stop sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); release_sock(sk); lock_sock() sk_mem_charge() get_page() sk_psock_queue_msg() sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED); drop_sk_msg() release_sock() While drop_sk_msg(), the msg has charged memory form sk by sk_mem_charge and has sg pages need to put. To fix we use sk_msg_free() and then kfee() msg. This issue can cause the following info: WARNING: CPU: 0 PID: 9202 at net/core/stream.c:205 sk_stream_kill_queues+0xc8/0xe0 Call Trace: inet_csk_destroy_sock+0x55/0x110 tcp_rcv_state_process+0xe5f/0xe90 ? sk_filter_trim_cap+0x10d/0x230 ? tcp_v4_do_rcv+0x161/0x250 tcp_v4_do_rcv+0x161/0x250 tcp_v4_rcv+0xc3a/0xce0 ip_protocol_deliver_rcu+0x3d/0x230 ip_local_deliver_finish+0x54/0x60 ip_local_deliver+0xfd/0x110 ? ip_protocol_deliver_rcu+0x230/0x230 ip_rcv+0xd6/0x100 ? ip_local_deliver+0x110/0x110 __netif_receive_skb_one_core+0x85/0xa0 process_backlog+0xa4/0x160 __napi_poll+0x29/0x1b0 net_rx_action+0x287/0x300 __do_softirq+0xff/0x2fc do_softirq+0x79/0x90 WARNING: CPU: 0 PID: 531 at net/ipv4/af_inet.c:154 inet_sock_destruct+0x175/0x1b0 Call Trace: __sk_destruct+0x24/0x1f0 sk_psock_destroy+0x19b/0x1c0 process_one_work+0x1b3/0x3c0 ? process_one_work+0x3c0/0x3c0 worker_thread+0x30/0x350 ? process_one_work+0x3c0/0x3c0 kthread+0xe6/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Fixes: 9635720b7c88 ("bpf, sockmap: Fix memleak on ingress msg enqueue") Signed-off-by: Wang Yufen Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220304081145.2037182-2-wangyufen@huawei.com --- include/linux/skmsg.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index fdb5375f0562..c5a2d6f50f25 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -304,21 +304,16 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) -{ - if (msg->skb) - sock_drop(psock->sk, msg->skb); - kfree(msg); -} - static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) list_add_tail(&msg->list, &psock->ingress_msg); - else - drop_sk_msg(psock, msg); + else { + sk_msg_free(psock->sk, msg); + kfree(msg); + } spin_unlock_bh(&psock->ingress_lock); } -- cgit v1.2.3-71-gd317 From 23a9dbbe0faf124fc4c139615633b9d12a3a89ef Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Mar 2022 18:34:06 +0300 Subject: NFSD: prevent integer overflow on 32 bit systems On a 32 bit system, the "len * sizeof(*p)" operation can have an integer overflow. Cc: stable@vger.kernel.org Signed-off-by: Dan Carpenter Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b519609af1d0..4417f667c757 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -731,6 +731,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr, if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; + if (len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; -- cgit v1.2.3-71-gd317 From b0ae33a2d2fb6c55117b377ec4ae3f2c84eab6a2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 4 Mar 2022 16:19:55 +0100 Subject: usb: early: xhci-dbc: Remove duplicate keep parsing The generic earlyprintk= parsing already parses the optional ",keep", no need to duplicate that in the xdbc driver. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220304152135.975568860@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/early_printk.c | 2 +- drivers/usb/early/xhci-dbc.c | 5 ++--- include/linux/usb/xhci-dbgp.h | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index d3c531d3b244..68b38925a74f 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -387,7 +387,7 @@ static int __init setup_early_printk(char *buf) #endif #ifdef CONFIG_EARLY_PRINTK_USB_XDBC if (!strncmp(buf, "xdbc", 4)) - early_xdbc_parse_parameter(buf + 4); + early_xdbc_parse_parameter(buf + 4, keep); #endif buf++; diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index 4502108069cd..100a45d26b5a 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -599,7 +599,7 @@ static int __init xdbc_early_setup(void) return 0; } -int __init early_xdbc_parse_parameter(char *s) +int __init early_xdbc_parse_parameter(char *s, int keep_early) { unsigned long dbgp_num = 0; u32 bus, dev, func, offset; @@ -608,8 +608,7 @@ int __init early_xdbc_parse_parameter(char *s) if (!early_pci_allowed()) return -EPERM; - if (strstr(s, "keep")) - early_console_keep = true; + early_console_keep = keep_early; if (xdbc.xdbc_reg) return 0; diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h index 0a37f1283bf0..01fe768873f9 100644 --- a/include/linux/usb/xhci-dbgp.h +++ b/include/linux/usb/xhci-dbgp.h @@ -15,7 +15,7 @@ #define __LINUX_XHCI_DBGP_H #ifdef CONFIG_EARLY_PRINTK_USB_XDBC -int __init early_xdbc_parse_parameter(char *s); +int __init early_xdbc_parse_parameter(char *s, int keep_early); int __init early_xdbc_setup_hardware(void); void __init early_xdbc_register_console(void); #else -- cgit v1.2.3-71-gd317 From ff5812e00d5e7bdeae68784d99686eff67896931 Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Tue, 8 Mar 2022 18:48:54 +0000 Subject: crypto: hisilicon/qm: Move the QM header to include/linux Since we are going to introduce VFIO PCI HiSilicon ACC driver for live migration in subsequent patches, move the ACC QM header file to a common include dir. Acked-by: Zhou Wang Acked-by: Longfang Liu Acked-by: Kai Ye Signed-off-by: Shameer Kolothum Link: https://lore.kernel.org/r/20220308184902.2242-2-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson --- drivers/crypto/hisilicon/hpre/hpre.h | 2 +- drivers/crypto/hisilicon/qm.c | 2 +- drivers/crypto/hisilicon/qm.h | 441 ----------------------------------- drivers/crypto/hisilicon/sec2/sec.h | 2 +- drivers/crypto/hisilicon/sgl.c | 2 +- drivers/crypto/hisilicon/zip/zip.h | 2 +- include/linux/hisi_acc_qm.h | 441 +++++++++++++++++++++++++++++++++++ 7 files changed, 446 insertions(+), 446 deletions(-) delete mode 100644 drivers/crypto/hisilicon/qm.h create mode 100644 include/linux/hisi_acc_qm.h (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index e0b4a1982ee9..9a0558ed82f9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -4,7 +4,7 @@ #define __HISI_HPRE_H #include -#include "../qm.h" +#include #define HPRE_SQE_SIZE sizeof(struct hpre_sqe) #define HPRE_PF_DEF_Q_NUM 64 diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c5b84a5ea350..ed23e1d3fa27 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -15,7 +15,7 @@ #include #include #include -#include "qm.h" +#include /* eq/aeq irq enable */ #define QM_VF_AEQ_INT_SOURCE 0x0 diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h deleted file mode 100644 index 3068093229a5..000000000000 --- a/drivers/crypto/hisilicon/qm.h +++ /dev/null @@ -1,441 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 HiSilicon Limited. */ -#ifndef HISI_ACC_QM_H -#define HISI_ACC_QM_H - -#include -#include -#include -#include -#include - -#define QM_QNUM_V1 4096 -#define QM_QNUM_V2 1024 -#define QM_MAX_VFS_NUM_V2 63 - -/* qm user domain */ -#define QM_ARUSER_M_CFG_1 0x100088 -#define AXUSER_SNOOP_ENABLE BIT(30) -#define AXUSER_CMD_TYPE GENMASK(14, 12) -#define AXUSER_CMD_SMMU_NORMAL 1 -#define AXUSER_NS BIT(6) -#define AXUSER_NO BIT(5) -#define AXUSER_FP BIT(4) -#define AXUSER_SSV BIT(0) -#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ - FIELD_PREP(AXUSER_CMD_TYPE, \ - AXUSER_CMD_SMMU_NORMAL) | \ - AXUSER_NS | AXUSER_NO | AXUSER_FP) -#define QM_ARUSER_M_CFG_ENABLE 0x100090 -#define ARUSER_M_CFG_ENABLE 0xfffffffe -#define QM_AWUSER_M_CFG_1 0x100098 -#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 -#define AWUSER_M_CFG_ENABLE 0xfffffffe -#define QM_WUSER_M_CFG_ENABLE 0x1000a8 -#define WUSER_M_CFG_ENABLE 0xffffffff - -/* qm cache */ -#define QM_CACHE_CTL 0x100050 -#define SQC_CACHE_ENABLE BIT(0) -#define CQC_CACHE_ENABLE BIT(1) -#define SQC_CACHE_WB_ENABLE BIT(4) -#define SQC_CACHE_WB_THRD GENMASK(10, 5) -#define CQC_CACHE_WB_ENABLE BIT(11) -#define CQC_CACHE_WB_THRD GENMASK(17, 12) -#define QM_AXI_M_CFG 0x1000ac -#define AXI_M_CFG 0xffff -#define QM_AXI_M_CFG_ENABLE 0x1000b0 -#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 -#define AXI_M_CFG_ENABLE 0xffffffff -#define QM_PEH_AXUSER_CFG 0x1000cc -#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 -#define PEH_AXUSER_CFG 0x401001 -#define PEH_AXUSER_CFG_ENABLE 0xffffffff - -#define QM_AXI_RRESP BIT(0) -#define QM_AXI_BRESP BIT(1) -#define QM_ECC_MBIT BIT(2) -#define QM_ECC_1BIT BIT(3) -#define QM_ACC_GET_TASK_TIMEOUT BIT(4) -#define QM_ACC_DO_TASK_TIMEOUT BIT(5) -#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) -#define QM_SQ_CQ_VF_INVALID BIT(7) -#define QM_CQ_VF_INVALID BIT(8) -#define QM_SQ_VF_INVALID BIT(9) -#define QM_DB_TIMEOUT BIT(10) -#define QM_OF_FIFO_OF BIT(11) -#define QM_DB_RANDOM_INVALID BIT(12) -#define QM_MAILBOX_TIMEOUT BIT(13) -#define QM_FLR_TIMEOUT BIT(14) - -#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ - QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ - QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ - QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) -#define QM_BASE_CE QM_ECC_1BIT - -#define QM_Q_DEPTH 1024 -#define QM_MIN_QNUM 2 -#define HISI_ACC_SGL_SGE_NR_MAX 255 -#define QM_SHAPER_CFG 0x100164 -#define QM_SHAPER_ENABLE BIT(30) -#define QM_SHAPER_TYPE1_OFFSET 10 - -/* page number for queue file region */ -#define QM_DOORBELL_PAGE_NR 1 - -/* uacce mode of the driver */ -#define UACCE_MODE_NOUACCE 0 /* don't use uacce */ -#define UACCE_MODE_SVA 1 /* use uacce sva mode */ -#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" - -enum qm_stop_reason { - QM_NORMAL, - QM_SOFT_RESET, - QM_FLR, -}; - -enum qm_state { - QM_INIT = 0, - QM_START, - QM_CLOSE, - QM_STOP, -}; - -enum qp_state { - QP_INIT = 1, - QP_START, - QP_STOP, - QP_CLOSE, -}; - -enum qm_hw_ver { - QM_HW_UNKNOWN = -1, - QM_HW_V1 = 0x20, - QM_HW_V2 = 0x21, - QM_HW_V3 = 0x30, -}; - -enum qm_fun_type { - QM_HW_PF, - QM_HW_VF, -}; - -enum qm_debug_file { - CURRENT_QM, - CURRENT_Q, - CLEAR_ENABLE, - DEBUG_FILE_NUM, -}; - -struct qm_dfx { - atomic64_t err_irq_cnt; - atomic64_t aeq_irq_cnt; - atomic64_t abnormal_irq_cnt; - atomic64_t create_qp_err_cnt; - atomic64_t mb_err_cnt; -}; - -struct debugfs_file { - enum qm_debug_file index; - struct mutex lock; - struct qm_debug *debug; -}; - -struct qm_debug { - u32 curr_qm_qp_num; - u32 sqe_mask_offset; - u32 sqe_mask_len; - struct qm_dfx dfx; - struct dentry *debug_root; - struct dentry *qm_d; - struct debugfs_file files[DEBUG_FILE_NUM]; -}; - -struct qm_shaper_factor { - u32 func_qos; - u64 cir_b; - u64 cir_u; - u64 cir_s; - u64 cbs_s; -}; - -struct qm_dma { - void *va; - dma_addr_t dma; - size_t size; -}; - -struct hisi_qm_status { - u32 eq_head; - bool eqc_phase; - u32 aeq_head; - bool aeqc_phase; - atomic_t flags; - int stop_reason; -}; - -struct hisi_qm; - -struct hisi_qm_err_info { - char *acpi_rst; - u32 msi_wr_port; - u32 ecc_2bits_mask; - u32 dev_ce_mask; - u32 ce; - u32 nfe; - u32 fe; -}; - -struct hisi_qm_err_status { - u32 is_qm_ecc_mbit; - u32 is_dev_ecc_mbit; -}; - -struct hisi_qm_err_ini { - int (*hw_init)(struct hisi_qm *qm); - void (*hw_err_enable)(struct hisi_qm *qm); - void (*hw_err_disable)(struct hisi_qm *qm); - u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); - void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); - void (*open_axi_master_ooo)(struct hisi_qm *qm); - void (*close_axi_master_ooo)(struct hisi_qm *qm); - void (*open_sva_prefetch)(struct hisi_qm *qm); - void (*close_sva_prefetch)(struct hisi_qm *qm); - void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); - void (*err_info_init)(struct hisi_qm *qm); -}; - -struct hisi_qm_list { - struct mutex lock; - struct list_head list; - int (*register_to_crypto)(struct hisi_qm *qm); - void (*unregister_from_crypto)(struct hisi_qm *qm); -}; - -struct hisi_qm { - enum qm_hw_ver ver; - enum qm_fun_type fun_type; - const char *dev_name; - struct pci_dev *pdev; - void __iomem *io_base; - void __iomem *db_io_base; - u32 sqe_size; - u32 qp_base; - u32 qp_num; - u32 qp_in_used; - u32 ctrl_qp_num; - u32 max_qp_num; - u32 vfs_num; - u32 db_interval; - struct list_head list; - struct hisi_qm_list *qm_list; - - struct qm_dma qdma; - struct qm_sqc *sqc; - struct qm_cqc *cqc; - struct qm_eqe *eqe; - struct qm_aeqe *aeqe; - dma_addr_t sqc_dma; - dma_addr_t cqc_dma; - dma_addr_t eqe_dma; - dma_addr_t aeqe_dma; - - struct hisi_qm_status status; - const struct hisi_qm_err_ini *err_ini; - struct hisi_qm_err_info err_info; - struct hisi_qm_err_status err_status; - unsigned long misc_ctl; /* driver removing and reset sched */ - - struct rw_semaphore qps_lock; - struct idr qp_idr; - struct hisi_qp *qp_array; - - struct mutex mailbox_lock; - - const struct hisi_qm_hw_ops *ops; - - struct qm_debug debug; - - u32 error_mask; - - struct workqueue_struct *wq; - struct work_struct work; - struct work_struct rst_work; - struct work_struct cmd_process; - - const char *algs; - bool use_sva; - bool is_frozen; - - /* doorbell isolation enable */ - bool use_db_isolation; - resource_size_t phys_base; - resource_size_t db_phys_base; - struct uacce_device *uacce; - int mode; - struct qm_shaper_factor *factor; - u32 mb_qos; - u32 type_rate; -}; - -struct hisi_qp_status { - atomic_t used; - u16 sq_tail; - u16 cq_head; - bool cqc_phase; - atomic_t flags; -}; - -struct hisi_qp_ops { - int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); -}; - -struct hisi_qp { - u32 qp_id; - u8 alg_type; - u8 req_type; - - struct qm_dma qdma; - void *sqe; - struct qm_cqe *cqe; - dma_addr_t sqe_dma; - dma_addr_t cqe_dma; - - struct hisi_qp_status qp_status; - struct hisi_qp_ops *hw_ops; - void *qp_ctx; - void (*req_cb)(struct hisi_qp *qp, void *data); - void (*event_cb)(struct hisi_qp *qp); - - struct hisi_qm *qm; - bool is_resetting; - bool is_in_kernel; - u16 pasid; - struct uacce_queue *uacce_q; -}; - -static inline int q_num_set(const char *val, const struct kernel_param *kp, - unsigned int device) -{ - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - device, NULL); - u32 n, q_num; - int ret; - - if (!val) - return -EINVAL; - - if (!pdev) { - q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); - pr_info("No device found currently, suppose queue number is %u\n", - q_num); - } else { - if (pdev->revision == QM_HW_V1) - q_num = QM_QNUM_V1; - else - q_num = QM_QNUM_V2; - } - - ret = kstrtou32(val, 10, &n); - if (ret || n < QM_MIN_QNUM || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); -} - -static inline int vfs_num_set(const char *val, const struct kernel_param *kp) -{ - u32 n; - int ret; - - if (!val) - return -EINVAL; - - ret = kstrtou32(val, 10, &n); - if (ret < 0) - return ret; - - if (n > QM_MAX_VFS_NUM_V2) - return -EINVAL; - - return param_set_int(val, kp); -} - -static inline int mode_set(const char *val, const struct kernel_param *kp) -{ - u32 n; - int ret; - - if (!val) - return -EINVAL; - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || (n != UACCE_MODE_SVA && - n != UACCE_MODE_NOUACCE)) - return -EINVAL; - - return param_set_int(val, kp); -} - -static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) -{ - return mode_set(val, kp); -} - -static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) -{ - INIT_LIST_HEAD(&qm_list->list); - mutex_init(&qm_list->lock); -} - -int hisi_qm_init(struct hisi_qm *qm); -void hisi_qm_uninit(struct hisi_qm *qm); -int hisi_qm_start(struct hisi_qm *qm); -int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); -int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); -int hisi_qm_stop_qp(struct hisi_qp *qp); -void hisi_qm_release_qp(struct hisi_qp *qp); -int hisi_qp_send(struct hisi_qp *qp, const void *msg); -int hisi_qm_get_free_qp_num(struct hisi_qm *qm); -int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); -void hisi_qm_debug_init(struct hisi_qm *qm); -enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); -void hisi_qm_debug_regs_clear(struct hisi_qm *qm); -int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); -int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); -int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); -void hisi_qm_dev_err_init(struct hisi_qm *qm); -void hisi_qm_dev_err_uninit(struct hisi_qm *qm); -pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, - pci_channel_state_t state); -pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); -void hisi_qm_reset_prepare(struct pci_dev *pdev); -void hisi_qm_reset_done(struct pci_dev *pdev); - -struct hisi_acc_sgl_pool; -struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, - struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, - u32 index, dma_addr_t *hw_sgl_dma); -void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, - struct hisi_acc_hw_sgl *hw_sgl); -struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, - u32 count, u32 sge_nr); -void hisi_acc_free_sgl_pool(struct device *dev, - struct hisi_acc_sgl_pool *pool); -int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, - u8 alg_type, int node, struct hisi_qp **qps); -void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); -void hisi_qm_dev_shutdown(struct pci_dev *pdev); -void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -int hisi_qm_resume(struct device *dev); -int hisi_qm_suspend(struct device *dev); -void hisi_qm_pm_uninit(struct hisi_qm *qm); -void hisi_qm_pm_init(struct hisi_qm *qm); -int hisi_qm_get_dfx_access(struct hisi_qm *qm); -void hisi_qm_put_dfx_access(struct hisi_qm *qm); -void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); -#endif diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index d97cf02b1df7..c2e9b01187a7 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,7 +4,7 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include "../qm.h" +#include #include "sec_crypto.h" /* Algorithm resource per hardware SEC queue */ diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 057273769f26..f7efc02b065f 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include +#include #include #include -#include "qm.h" #define HISI_ACC_SGL_SGE_NR_MIN 1 #define HISI_ACC_SGL_NR_MAX 256 diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 517fdbdff3ea..3dfd3bac5a33 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -7,7 +7,7 @@ #define pr_fmt(fmt) "hisi_zip: " fmt #include -#include "../qm.h" +#include enum hisi_zip_error_type { /* negative compression */ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h new file mode 100644 index 000000000000..3068093229a5 --- /dev/null +++ b/include/linux/hisi_acc_qm.h @@ -0,0 +1,441 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef HISI_ACC_QM_H +#define HISI_ACC_QM_H + +#include +#include +#include +#include +#include + +#define QM_QNUM_V1 4096 +#define QM_QNUM_V2 1024 +#define QM_MAX_VFS_NUM_V2 63 + +/* qm user domain */ +#define QM_ARUSER_M_CFG_1 0x100088 +#define AXUSER_SNOOP_ENABLE BIT(30) +#define AXUSER_CMD_TYPE GENMASK(14, 12) +#define AXUSER_CMD_SMMU_NORMAL 1 +#define AXUSER_NS BIT(6) +#define AXUSER_NO BIT(5) +#define AXUSER_FP BIT(4) +#define AXUSER_SSV BIT(0) +#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ + FIELD_PREP(AXUSER_CMD_TYPE, \ + AXUSER_CMD_SMMU_NORMAL) | \ + AXUSER_NS | AXUSER_NO | AXUSER_FP) +#define QM_ARUSER_M_CFG_ENABLE 0x100090 +#define ARUSER_M_CFG_ENABLE 0xfffffffe +#define QM_AWUSER_M_CFG_1 0x100098 +#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 +#define AWUSER_M_CFG_ENABLE 0xfffffffe +#define QM_WUSER_M_CFG_ENABLE 0x1000a8 +#define WUSER_M_CFG_ENABLE 0xffffffff + +/* qm cache */ +#define QM_CACHE_CTL 0x100050 +#define SQC_CACHE_ENABLE BIT(0) +#define CQC_CACHE_ENABLE BIT(1) +#define SQC_CACHE_WB_ENABLE BIT(4) +#define SQC_CACHE_WB_THRD GENMASK(10, 5) +#define CQC_CACHE_WB_ENABLE BIT(11) +#define CQC_CACHE_WB_THRD GENMASK(17, 12) +#define QM_AXI_M_CFG 0x1000ac +#define AXI_M_CFG 0xffff +#define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 +#define AXI_M_CFG_ENABLE 0xffffffff +#define QM_PEH_AXUSER_CFG 0x1000cc +#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 +#define PEH_AXUSER_CFG 0x401001 +#define PEH_AXUSER_CFG_ENABLE 0xffffffff + +#define QM_AXI_RRESP BIT(0) +#define QM_AXI_BRESP BIT(1) +#define QM_ECC_MBIT BIT(2) +#define QM_ECC_1BIT BIT(3) +#define QM_ACC_GET_TASK_TIMEOUT BIT(4) +#define QM_ACC_DO_TASK_TIMEOUT BIT(5) +#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) +#define QM_SQ_CQ_VF_INVALID BIT(7) +#define QM_CQ_VF_INVALID BIT(8) +#define QM_SQ_VF_INVALID BIT(9) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) +#define QM_DB_RANDOM_INVALID BIT(12) +#define QM_MAILBOX_TIMEOUT BIT(13) +#define QM_FLR_TIMEOUT BIT(14) + +#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ + QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ + QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) +#define QM_BASE_CE QM_ECC_1BIT + +#define QM_Q_DEPTH 1024 +#define QM_MIN_QNUM 2 +#define HISI_ACC_SGL_SGE_NR_MAX 255 +#define QM_SHAPER_CFG 0x100164 +#define QM_SHAPER_ENABLE BIT(30) +#define QM_SHAPER_TYPE1_OFFSET 10 + +/* page number for queue file region */ +#define QM_DOORBELL_PAGE_NR 1 + +/* uacce mode of the driver */ +#define UACCE_MODE_NOUACCE 0 /* don't use uacce */ +#define UACCE_MODE_SVA 1 /* use uacce sva mode */ +#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" + +enum qm_stop_reason { + QM_NORMAL, + QM_SOFT_RESET, + QM_FLR, +}; + +enum qm_state { + QM_INIT = 0, + QM_START, + QM_CLOSE, + QM_STOP, +}; + +enum qp_state { + QP_INIT = 1, + QP_START, + QP_STOP, + QP_CLOSE, +}; + +enum qm_hw_ver { + QM_HW_UNKNOWN = -1, + QM_HW_V1 = 0x20, + QM_HW_V2 = 0x21, + QM_HW_V3 = 0x30, +}; + +enum qm_fun_type { + QM_HW_PF, + QM_HW_VF, +}; + +enum qm_debug_file { + CURRENT_QM, + CURRENT_Q, + CLEAR_ENABLE, + DEBUG_FILE_NUM, +}; + +struct qm_dfx { + atomic64_t err_irq_cnt; + atomic64_t aeq_irq_cnt; + atomic64_t abnormal_irq_cnt; + atomic64_t create_qp_err_cnt; + atomic64_t mb_err_cnt; +}; + +struct debugfs_file { + enum qm_debug_file index; + struct mutex lock; + struct qm_debug *debug; +}; + +struct qm_debug { + u32 curr_qm_qp_num; + u32 sqe_mask_offset; + u32 sqe_mask_len; + struct qm_dfx dfx; + struct dentry *debug_root; + struct dentry *qm_d; + struct debugfs_file files[DEBUG_FILE_NUM]; +}; + +struct qm_shaper_factor { + u32 func_qos; + u64 cir_b; + u64 cir_u; + u64 cir_s; + u64 cbs_s; +}; + +struct qm_dma { + void *va; + dma_addr_t dma; + size_t size; +}; + +struct hisi_qm_status { + u32 eq_head; + bool eqc_phase; + u32 aeq_head; + bool aeqc_phase; + atomic_t flags; + int stop_reason; +}; + +struct hisi_qm; + +struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + u32 ecc_2bits_mask; + u32 dev_ce_mask; + u32 ce; + u32 nfe; + u32 fe; +}; + +struct hisi_qm_err_status { + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; +}; + +struct hisi_qm_err_ini { + int (*hw_init)(struct hisi_qm *qm); + void (*hw_err_enable)(struct hisi_qm *qm); + void (*hw_err_disable)(struct hisi_qm *qm); + u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*open_axi_master_ooo)(struct hisi_qm *qm); + void (*close_axi_master_ooo)(struct hisi_qm *qm); + void (*open_sva_prefetch)(struct hisi_qm *qm); + void (*close_sva_prefetch)(struct hisi_qm *qm); + void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + void (*err_info_init)(struct hisi_qm *qm); +}; + +struct hisi_qm_list { + struct mutex lock; + struct list_head list; + int (*register_to_crypto)(struct hisi_qm *qm); + void (*unregister_from_crypto)(struct hisi_qm *qm); +}; + +struct hisi_qm { + enum qm_hw_ver ver; + enum qm_fun_type fun_type; + const char *dev_name; + struct pci_dev *pdev; + void __iomem *io_base; + void __iomem *db_io_base; + u32 sqe_size; + u32 qp_base; + u32 qp_num; + u32 qp_in_used; + u32 ctrl_qp_num; + u32 max_qp_num; + u32 vfs_num; + u32 db_interval; + struct list_head list; + struct hisi_qm_list *qm_list; + + struct qm_dma qdma; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqe *eqe; + struct qm_aeqe *aeqe; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqe_dma; + dma_addr_t aeqe_dma; + + struct hisi_qm_status status; + const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_info err_info; + struct hisi_qm_err_status err_status; + unsigned long misc_ctl; /* driver removing and reset sched */ + + struct rw_semaphore qps_lock; + struct idr qp_idr; + struct hisi_qp *qp_array; + + struct mutex mailbox_lock; + + const struct hisi_qm_hw_ops *ops; + + struct qm_debug debug; + + u32 error_mask; + + struct workqueue_struct *wq; + struct work_struct work; + struct work_struct rst_work; + struct work_struct cmd_process; + + const char *algs; + bool use_sva; + bool is_frozen; + + /* doorbell isolation enable */ + bool use_db_isolation; + resource_size_t phys_base; + resource_size_t db_phys_base; + struct uacce_device *uacce; + int mode; + struct qm_shaper_factor *factor; + u32 mb_qos; + u32 type_rate; +}; + +struct hisi_qp_status { + atomic_t used; + u16 sq_tail; + u16 cq_head; + bool cqc_phase; + atomic_t flags; +}; + +struct hisi_qp_ops { + int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); +}; + +struct hisi_qp { + u32 qp_id; + u8 alg_type; + u8 req_type; + + struct qm_dma qdma; + void *sqe; + struct qm_cqe *cqe; + dma_addr_t sqe_dma; + dma_addr_t cqe_dma; + + struct hisi_qp_status qp_status; + struct hisi_qp_ops *hw_ops; + void *qp_ctx; + void (*req_cb)(struct hisi_qp *qp, void *data); + void (*event_cb)(struct hisi_qp *qp); + + struct hisi_qm *qm; + bool is_resetting; + bool is_in_kernel; + u16 pasid; + struct uacce_queue *uacce_q; +}; + +static inline int q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + device, NULL); + u32 n, q_num; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); + pr_info("No device found currently, suppose queue number is %u\n", + q_num); + } else { + if (pdev->revision == QM_HW_V1) + q_num = QM_QNUM_V1; + else + q_num = QM_QNUM_V2; + } + + ret = kstrtou32(val, 10, &n); + if (ret || n < QM_MIN_QNUM || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0) + return ret; + + if (n > QM_MAX_VFS_NUM_V2) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int mode_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || (n != UACCE_MODE_SVA && + n != UACCE_MODE_NOUACCE)) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) +{ + INIT_LIST_HEAD(&qm_list->list); + mutex_init(&qm_list->lock); +} + +int hisi_qm_init(struct hisi_qm *qm); +void hisi_qm_uninit(struct hisi_qm *qm); +int hisi_qm_start(struct hisi_qm *qm); +int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); +int hisi_qm_stop_qp(struct hisi_qp *qp); +void hisi_qm_release_qp(struct hisi_qp *qp); +int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_get_free_qp_num(struct hisi_qm *qm); +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); +void hisi_qm_debug_init(struct hisi_qm *qm); +enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); +void hisi_qm_debug_regs_clear(struct hisi_qm *qm); +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); +int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); +void hisi_qm_dev_err_init(struct hisi_qm *qm); +void hisi_qm_dev_err_uninit(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, + pci_channel_state_t state); +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); +void hisi_qm_reset_prepare(struct pci_dev *pdev); +void hisi_qm_reset_done(struct pci_dev *pdev); + +struct hisi_acc_sgl_pool; +struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma); +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl); +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr); +void hisi_acc_free_sgl_pool(struct device *dev, + struct hisi_acc_sgl_pool *pool); +int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, + u8 alg_type, int node, struct hisi_qp **qps); +void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); +void hisi_qm_dev_shutdown(struct pci_dev *pdev); +void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_resume(struct device *dev); +int hisi_qm_suspend(struct device *dev); +void hisi_qm_pm_uninit(struct hisi_qm *qm); +void hisi_qm_pm_init(struct hisi_qm *qm); +int hisi_qm_get_dfx_access(struct hisi_qm *qm); +void hisi_qm_put_dfx_access(struct hisi_qm *qm); +void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); +#endif -- cgit v1.2.3-71-gd317 From b4b084d7133284e29777ca445e001186259602d4 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Tue, 8 Mar 2022 18:48:55 +0000 Subject: crypto: hisilicon/qm: Move few definitions to common header Move Doorbell and Mailbox definitions to common header file. Also export QM mailbox functions. This will be useful when we introduce VFIO PCI HiSilicon ACC live migration driver. Signed-off-by: Longfang Liu Acked-by: Zhou Wang Signed-off-by: Shameer Kolothum Link: https://lore.kernel.org/r/20220308184902.2242-3-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson --- drivers/crypto/hisilicon/qm.c | 58 ++++++++++++++----------------------------- include/linux/hisi_acc_qm.h | 38 ++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ed23e1d3fa27..c88e013371af 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -33,23 +33,6 @@ #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 /* mailbox */ -#define QM_MB_CMD_SQC 0x0 -#define QM_MB_CMD_CQC 0x1 -#define QM_MB_CMD_EQC 0x2 -#define QM_MB_CMD_AEQC 0x3 -#define QM_MB_CMD_SQC_BT 0x4 -#define QM_MB_CMD_CQC_BT 0x5 -#define QM_MB_CMD_SQC_VFT_V2 0x6 -#define QM_MB_CMD_STOP_QP 0x8 -#define QM_MB_CMD_SRC 0xc -#define QM_MB_CMD_DST 0xd - -#define QM_MB_CMD_SEND_BASE 0x300 -#define QM_MB_EVENT_SHIFT 8 -#define QM_MB_BUSY_SHIFT 13 -#define QM_MB_OP_SHIFT 14 -#define QM_MB_CMD_DATA_ADDR_L 0x304 -#define QM_MB_CMD_DATA_ADDR_H 0x308 #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) @@ -103,19 +86,12 @@ #define QM_DB_CMD_SHIFT_V1 16 #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 -#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 -#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 #define QM_QUE_ISO_CFG_V 0x0030 #define QM_PAGE_SIZE 0x0034 #define QM_QUE_ISO_EN 0x100154 #define QM_CAPBILITY 0x100158 #define QM_QP_NUN_MASK GENMASK(10, 0) #define QM_QP_DB_INTERVAL 0x10000 -#define QM_QP_MAX_NUM_SHIFT 11 -#define QM_DB_CMD_SHIFT_V2 12 -#define QM_DB_RAND_SHIFT_V2 16 -#define QM_DB_INDEX_SHIFT_V2 32 -#define QM_DB_PRIORITY_SHIFT_V2 48 #define QM_MEM_START_INIT 0x100040 #define QM_MEM_INIT_DONE 0x100044 @@ -693,7 +669,7 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, } /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -static int qm_wait_mb_ready(struct hisi_qm *qm) +int hisi_qm_wait_mb_ready(struct hisi_qm *qm) { u32 val; @@ -701,6 +677,7 @@ static int qm_wait_mb_ready(struct hisi_qm *qm) val, !((val >> QM_MB_BUSY_SHIFT) & 0x1), POLL_PERIOD, POLL_TIMEOUT); } +EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) @@ -726,14 +703,14 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) { - if (unlikely(qm_wait_mb_ready(qm))) { + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); goto mb_busy; } qm_mb_write(qm, mailbox); - if (unlikely(qm_wait_mb_ready(qm))) { + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); goto mb_busy; } @@ -745,8 +722,8 @@ mb_busy: return -EBUSY; } -static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op) +int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op) { struct qm_mailbox mailbox; int ret; @@ -762,6 +739,7 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, return ret; } +EXPORT_SYMBOL_GPL(hisi_qm_mb); static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { @@ -1351,7 +1329,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret; - ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); if (ret) return ret; @@ -1725,12 +1703,12 @@ static int dump_show(struct hisi_qm *qm, void *info, static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); + return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); } static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); + return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); } static int qm_sqc_dump(struct hisi_qm *qm, const char *s) @@ -1842,7 +1820,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, if (IS_ERR(xeqc)) return PTR_ERR(xeqc); - ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); + ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); if (ret) goto err_free_ctx; @@ -2495,7 +2473,7 @@ unlock: static int qm_stop_qp(struct hisi_qp *qp) { - return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); + return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); } static int qm_set_msi(struct hisi_qm *qm, bool set) @@ -2763,7 +2741,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); kfree(sqc); @@ -2804,7 +2782,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); kfree(cqc); @@ -3655,7 +3633,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); kfree(eqc); @@ -3684,7 +3662,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); kfree(aeqc); @@ -3723,11 +3701,11 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret; - ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret; - ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 3068093229a5..6a6477c34666 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -34,6 +34,40 @@ #define QM_WUSER_M_CFG_ENABLE 0x1000a8 #define WUSER_M_CFG_ENABLE 0xffffffff +/* mailbox */ +#define QM_MB_CMD_SQC 0x0 +#define QM_MB_CMD_CQC 0x1 +#define QM_MB_CMD_EQC 0x2 +#define QM_MB_CMD_AEQC 0x3 +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 +#define QM_MB_CMD_STOP_QP 0x8 +#define QM_MB_CMD_SRC 0xc +#define QM_MB_CMD_DST 0xd + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_EVENT_SHIFT 8 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_CMD_DATA_ADDR_L 0x304 +#define QM_MB_CMD_DATA_ADDR_H 0x308 +#define QM_MB_MAX_WAIT_CNT 6000 + +/* doorbell */ +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_CMD_EQ 2 +#define QM_DOORBELL_CMD_AEQ 3 + +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_QP_MAX_NUM_SHIFT 11 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 + /* qm cache */ #define QM_CACHE_CTL 0x100050 #define SQC_CACHE_ENABLE BIT(0) @@ -414,6 +448,10 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); void hisi_qm_reset_prepare(struct pci_dev *pdev); void hisi_qm_reset_done(struct pci_dev *pdev); +int hisi_qm_wait_mb_ready(struct hisi_qm *qm); +int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op); + struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, -- cgit v1.2.3-71-gd317 From fae74feacd2dfb6172c4862fc760f969a991c06f Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Tue, 8 Mar 2022 18:48:56 +0000 Subject: hisi_acc_qm: Move VF PCI device IDs to common header Move the PCI Device IDs of HiSilicon ACC VF devices to a common header and also use a uniform naming convention. This will be useful when we introduce the vfio PCI HiSilicon ACC live migration driver in subsequent patches. Cc: Bjorn Helgaas Acked-by: Zhou Wang Acked-by: Longfang Liu Acked-by: Kai Ye Signed-off-by: Shameer Kolothum Acked-by: Bjorn Helgaas # pci_ids.h Link: https://lore.kernel.org/r/20220308184902.2242-4-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson --- drivers/crypto/hisilicon/hpre/hpre_main.c | 13 ++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 15 +++++++-------- drivers/crypto/hisilicon/zip/zip_main.c | 11 +++++------ include/linux/pci_ids.h | 3 +++ 4 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index ebfab3e14499..3589d8879b5e 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -68,8 +68,7 @@ #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 #define HPRE_DBGFS_VAL_MAX_LEN 20 -#define HPRE_PCI_DEVICE_ID 0xa258 -#define HPRE_PCI_VF_DEVICE_ID 0xa259 +#define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1) #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) @@ -111,8 +110,8 @@ static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) }, { 0, } }; @@ -242,7 +241,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); } static const struct kernel_param_ops hpre_pf_q_num_ops = { @@ -921,7 +920,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; hisi_qm_debug_init(qm); - if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; @@ -958,7 +957,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; - qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 26d3ab1d308b..311a8747b5bf 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -20,8 +20,7 @@ #define SEC_VF_NUM 63 #define SEC_QUEUE_NUM_V1 4096 -#define SEC_PF_PCI_DEVICE_ID 0xa255 -#define SEC_VF_PCI_DEVICE_ID 0xa256 +#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd @@ -225,7 +224,7 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); } static const struct kernel_param_ops sec_pf_q_num_ops = { @@ -313,8 +312,8 @@ module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static const struct pci_device_id sec_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sec_dev_ids); @@ -717,7 +716,7 @@ static int sec_core_debug_init(struct hisi_qm *qm) regset->base = qm->io_base; regset->dev = dev; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { @@ -735,7 +734,7 @@ static int sec_debug_init(struct hisi_qm *qm) struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) { for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { spin_lock_init(&sec->debug.files[i].lock); sec->debug.files[i].index = i; @@ -877,7 +876,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; - qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = SEC_PF_DEF_Q_BASE; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 678f8b58ec42..66decfe07282 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -15,8 +15,7 @@ #include #include "zip.h" -#define PCI_DEVICE_ID_ZIP_PF 0xa250 -#define PCI_DEVICE_ID_ZIP_VF 0xa251 +#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 #define HZIP_QUEUE_NUM_V1 4096 @@ -246,7 +245,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); } static const struct kernel_param_ops pf_q_num_ops = { @@ -268,8 +267,8 @@ module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static const struct pci_device_id hisi_zip_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); @@ -838,7 +837,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; - qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HZIP_PF_DEF_Q_BASE; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index aad54c666407..31dee2b65a62 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2529,6 +2529,9 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff #define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251 +#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256 +#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259 #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 -- cgit v1.2.3-71-gd317 From 442fbc099b839551f8576723da22c1269cc695ce Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Tue, 8 Mar 2022 18:48:59 +0000 Subject: hisi_acc_vfio_pci: Add helper to retrieve the struct pci_driver struct pci_driver pointer is an input into the pci_iov_get_pf_drvdata(). Introduce helpers to retrieve the ACC PF dev struct pci_driver pointers as we use this in ACC vfio migration driver. Acked-by: Zhou Wang Acked-by: Kai Ye Acked-by: Longfang Liu Signed-off-by: Shameer Kolothum Link: https://lore.kernel.org/r/20220308184902.2242-7-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson --- drivers/crypto/hisilicon/hpre/hpre_main.c | 6 ++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 6 ++++++ drivers/crypto/hisilicon/zip/zip_main.c | 6 ++++++ include/linux/hisi_acc_qm.h | 5 +++++ 4 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 3589d8879b5e..36ab30e9e654 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1190,6 +1190,12 @@ static struct pci_driver hpre_pci_driver = { .driver.pm = &hpre_pm_ops, }; +struct pci_driver *hisi_hpre_get_pf_driver(void) +{ + return &hpre_pci_driver; +} +EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver); + static void hpre_register_debugfs(void) { if (!debugfs_initialized()) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 311a8747b5bf..421a405ca337 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1088,6 +1088,12 @@ static struct pci_driver sec_pci_driver = { .driver.pm = &sec_pm_ops, }; +struct pci_driver *hisi_sec_get_pf_driver(void) +{ + return &sec_pci_driver; +} +EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver); + static void sec_register_debugfs(void) { if (!debugfs_initialized()) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 66decfe07282..4534e1e107d1 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1012,6 +1012,12 @@ static struct pci_driver hisi_zip_pci_driver = { .driver.pm = &hisi_zip_pm_ops, }; +struct pci_driver *hisi_zip_get_pf_driver(void) +{ + return &hisi_zip_pci_driver; +} +EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver); + static void hisi_zip_register_debugfs(void) { if (!debugfs_initialized()) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 6a6477c34666..00f2a4db8723 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -476,4 +476,9 @@ void hisi_qm_pm_init(struct hisi_qm *qm); int hisi_qm_get_dfx_access(struct hisi_qm *qm); void hisi_qm_put_dfx_access(struct hisi_qm *qm); void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); + +/* Used by VFIO ACC live migration driver */ +struct pci_driver *hisi_sec_get_pf_driver(void); +struct pci_driver *hisi_hpre_get_pf_driver(void); +struct pci_driver *hisi_zip_get_pf_driver(void); #endif -- cgit v1.2.3-71-gd317 From 1e459b25081d4f939b8a1fb4c71dab0cec8f974a Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Tue, 8 Mar 2022 18:49:00 +0000 Subject: crypto: hisilicon/qm: Set the VF QM state register We use VF QM state register to record the status of the QM configuration state. This will be used in the ACC migration driver to determine whether we can safely save and restore the QM data. Signed-off-by: Longfang Liu Acked-by: Zhou Wang Signed-off-by: Shameer Kolothum Link: https://lore.kernel.org/r/20220308184902.2242-8-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson --- drivers/crypto/hisilicon/qm.c | 8 ++++++++ include/linux/hisi_acc_qm.h | 6 ++++++ 2 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c88e013371af..6a8776db38b5 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3492,6 +3492,12 @@ static void hisi_qm_pci_uninit(struct hisi_qm *qm) pci_disable_device(pdev); } +static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) +{ + if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) + writel(state, qm->io_base + QM_VF_STATE); +} + /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -3520,6 +3526,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); } + hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); qm_irq_unregister(qm); @@ -3745,6 +3752,7 @@ int hisi_qm_start(struct hisi_qm *qm) if (!ret) atomic_set(&qm->status.flags, QM_START); + hisi_qm_set_state(qm, QM_READY); err_unlock: up_write(&qm->qps_lock); return ret; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 00f2a4db8723..177f7b7cd414 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -67,6 +67,7 @@ #define QM_DB_RAND_SHIFT_V2 16 #define QM_DB_INDEX_SHIFT_V2 32 #define QM_DB_PRIORITY_SHIFT_V2 48 +#define QM_VF_STATE 0x60 /* qm cache */ #define QM_CACHE_CTL 0x100050 @@ -162,6 +163,11 @@ enum qm_debug_file { DEBUG_FILE_NUM, }; +enum qm_vf_state { + QM_READY = 0, + QM_NOT_READY, +}; + struct qm_dfx { atomic64_t err_irq_cnt; atomic64_t aeq_irq_cnt; -- cgit v1.2.3-71-gd317 From 8126b1c73108bc691f5643df19071a59a69d0bc6 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 14 Mar 2022 19:59:53 +0100 Subject: pstore: Don't use semaphores in always-atomic-context code pstore_dump() is *always* invoked in atomic context (nowadays in an RCU read-side critical section, before that under a spinlock). It doesn't make sense to try to use semaphores here. This is mostly a revert of commit ea84b580b955 ("pstore: Convert buf_lock to semaphore"), except that two parts aren't restored back exactly as they were: - keep the lock initialization in pstore_register - in efi_pstore_write(), always set the "block" flag to false - omit "is_locked", that was unnecessary since commit 959217c84c27 ("pstore: Actually give up during locking failure") - fix the bailout message The actual problem that the buggy commit was trying to address may have been that the use of preemptible() in efi_pstore_write() was wrong - it only looks at preempt_count() and the state of IRQs, but __rcu_read_lock() doesn't touch either of those under CONFIG_PREEMPT_RCU. (Sidenote: CONFIG_PREEMPT_RCU means that the scheduler can preempt tasks in RCU read-side critical sections, but you're not allowed to actively block/reschedule.) Lockdep probably never caught the problem because it's very rare that you actually hit the contended case, so lockdep always just sees the down_trylock(), not the down_interruptible(), and so it can't tell that there's a problem. Fixes: ea84b580b955 ("pstore: Convert buf_lock to semaphore") Cc: stable@vger.kernel.org Acked-by: Sebastian Andrzej Siewior Signed-off-by: Jann Horn Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220314185953.2068993-1-jannh@google.com --- drivers/firmware/efi/efi-pstore.c | 2 +- fs/pstore/platform.c | 38 ++++++++++++++++++-------------------- include/linux/pstore.h | 6 +++--- 3 files changed, 22 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0ef086e43090..7e771c56c13c 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - preemptible(), record->size, record->psi->buf); + false, record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) if (!schedule_work(&efivar_work)) diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index f243cb5e6a4f..e26162f102ff 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -143,21 +143,22 @@ static void pstore_timer_kick(void) mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); } -/* - * Should pstore_dump() wait for a concurrent pstore_dump()? If - * not, the current pstore_dump() will report a failure to dump - * and return. - */ -static bool pstore_cannot_wait(enum kmsg_dump_reason reason) +static bool pstore_cannot_block_path(enum kmsg_dump_reason reason) { - /* In NMI path, pstore shouldn't block regardless of reason. */ + /* + * In case of NMI path, pstore shouldn't be blocked + * regardless of reason. + */ if (in_nmi()) return true; switch (reason) { /* In panic case, other cpus are stopped by smp_send_stop(). */ case KMSG_DUMP_PANIC: - /* Emergency restart shouldn't be blocked. */ + /* + * Emergency restart shouldn't be blocked by spinning on + * pstore_info::buf_lock. + */ case KMSG_DUMP_EMERG: return true; default: @@ -389,21 +390,19 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned long total = 0; const char *why; unsigned int part = 1; + unsigned long flags = 0; int ret; why = kmsg_dump_reason_str(reason); - if (down_trylock(&psinfo->buf_lock)) { - /* Failed to acquire lock: give up if we cannot wait. */ - if (pstore_cannot_wait(reason)) { - pr_err("dump skipped in %s path: may corrupt error record\n", - in_nmi() ? "NMI" : why); - return; - } - if (down_interruptible(&psinfo->buf_lock)) { - pr_err("could not grab semaphore?!\n"); + if (pstore_cannot_block_path(reason)) { + if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { + pr_err("dump skipped in %s path because of concurrent dump\n", + in_nmi() ? "NMI" : why); return; } + } else { + spin_lock_irqsave(&psinfo->buf_lock, flags); } kmsg_dump_rewind(&iter); @@ -467,8 +466,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += record.size; part++; } - - up(&psinfo->buf_lock); + spin_unlock_irqrestore(&psinfo->buf_lock, flags); } static struct kmsg_dumper pstore_dumper = { @@ -594,7 +592,7 @@ int pstore_register(struct pstore_info *psi) psi->write_user = pstore_write_user_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); - sema_init(&psinfo->buf_lock, 1); + spin_lock_init(&psinfo->buf_lock); if (psi->flags & PSTORE_FLAGS_DMESG) allocate_buf_for_compression(); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index eb93a54cff31..e97a8188f0fd 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -87,7 +87,7 @@ struct pstore_record { * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: semaphore to serialize access to @buf + * @buf_lock: spinlock to serialize access to @buf * @buf: preallocated crash dump buffer * @bufsize: size of @buf available for crash dump bytes (must match * smallest number of bytes available for writing to a @@ -178,7 +178,7 @@ struct pstore_info { struct module *owner; const char *name; - struct semaphore buf_lock; + spinlock_t buf_lock; char *buf; size_t bufsize; -- cgit v1.2.3-71-gd317 From e621900ad28b748e058b81d6078a5d5eb37b3973 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:12 +0000 Subject: fs: Convert __set_page_dirty_buffers to block_dirty_folio Convert all callers; mostly this is just changing the aops to point at it, but a few implementations need a little more work. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- block/fops.c | 2 +- fs/adfs/inode.c | 2 +- fs/affs/file.c | 4 ++-- fs/bfs/file.c | 2 +- fs/buffer.c | 33 +++++++++++++++------------------ fs/ecryptfs/mmap.c | 2 +- fs/exfat/inode.c | 2 +- fs/ext2/inode.c | 8 ++++---- fs/ext4/inode.c | 12 ++++++------ fs/fat/inode.c | 2 +- fs/gfs2/aops.c | 16 +++++----------- fs/gfs2/meta_io.c | 4 ++-- fs/hfs/inode.c | 4 ++-- fs/hfsplus/inode.c | 4 ++-- fs/hpfs/file.c | 2 +- fs/jfs/inode.c | 2 +- fs/minix/inode.c | 2 +- fs/mpage.c | 2 +- fs/nilfs2/mdt.c | 4 ++-- fs/ntfs/aops.c | 12 ++++++------ fs/ntfs3/inode.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/omfs/file.c | 2 +- fs/reiserfs/inode.c | 14 +++++++------- fs/sysv/itree.c | 2 +- fs/udf/file.c | 2 +- fs/udf/inode.c | 2 +- fs/ufs/inode.c | 2 +- include/linux/buffer_head.h | 2 +- mm/filemap.c | 4 ++-- mm/page-writeback.c | 2 +- mm/rmap.c | 4 ++-- 32 files changed, 76 insertions(+), 85 deletions(-) (limited to 'include/linux') diff --git a/block/fops.c b/block/fops.c index 8ce1dccd15b9..796a78fd1583 100644 --- a/block/fops.c +++ b/block/fops.c @@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping, } const struct address_space_operations def_blk_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = blkdev_readpage, .readahead = blkdev_readahead, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 5c423254895a..561bc748c04a 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations adfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = adfs_readpage, .writepage = adfs_writepage, diff --git a/fs/affs/file.c b/fs/affs/file.c index 6d4921f97162..b3f81d84ff4c 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations affs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage, .writepage = affs_writepage, @@ -835,7 +835,7 @@ err_bh: } const struct address_space_operations affs_aops_ofs = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = affs_readpage_ofs, //.writepage = affs_writepage_ofs, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 2e42b82edb58..03139344568f 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations bfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = bfs_readpage, .writepage = bfs_writepage, diff --git a/fs/buffer.c b/fs/buffer.c index 5fe02e5a9807..28b9739b719b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); * FIXME: may need to call ->reservepage here as well. That's rather up to the * address_space though. */ -int __set_page_dirty_buffers(struct page *page) +bool block_dirty_folio(struct address_space *mapping, struct folio *folio) { - int newly_dirty; - struct address_space *mapping = page_mapping(page); - - if (unlikely(!mapping)) - return !TestSetPageDirty(page); + struct buffer_head *head; + bool newly_dirty; spin_lock(&mapping->private_lock); - if (page_has_buffers(page)) { - struct buffer_head *head = page_buffers(page); + head = folio_buffers(folio); + if (head) { struct buffer_head *bh = head; do { @@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page) * Lock out page's memcg migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - lock_page_memcg(page); - newly_dirty = !TestSetPageDirty(page); + folio_memcg_lock(folio); + newly_dirty = !folio_test_set_dirty(folio); spin_unlock(&mapping->private_lock); if (newly_dirty) - __set_page_dirty(page, mapping, 1); + __folio_mark_dirty(folio, mapping, 1); - unlock_page_memcg(page); + folio_memcg_unlock(folio); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return newly_dirty; } -EXPORT_SYMBOL(__set_page_dirty_buffers); +EXPORT_SYMBOL(block_dirty_folio); /* * Write out and wait upon a list of buffers. @@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio); /* * We attach and possibly dirty the buffers atomically wrt - * __set_page_dirty_buffers() via private_lock. try_to_free_buffers + * block_dirty_folio() via private_lock. try_to_free_buffers * is already excluded via the page lock. */ void create_empty_buffers(struct page *page, @@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page, (1 << BH_Dirty)|(1 << BH_Uptodate)); /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers + * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; + * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ @@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer); * * The same applies to regular filesystem pages: if all the buffers are * clean then we set the page clean and proceed. To do that, we require - * total exclusion from __set_page_dirty_buffers(). That is obtained with + * total exclusion from block_dirty_folio(). That is obtained with * private_lock. * * try_to_free_buffers() is non-blocking. @@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page) * the page also. * * private_lock must be held over this entire operation in order - * to synchronise against __set_page_dirty_buffers and prevent the + * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index bf7f35b375b7..9aabcb2f52e9 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -545,7 +545,7 @@ const struct address_space_operations ecryptfs_aops = { * feedback. */ #ifdef CONFIG_BLOCK - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, #endif .writepage = ecryptfs_writepage, diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 5ed471eb973b..fc0ea1684880 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -490,7 +490,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) } static const struct address_space_operations exfat_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = exfat_readpage, .readahead = exfat_readahead, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 9b579ee56eaf..d9452a051198 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -967,8 +967,8 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc } const struct address_space_operations ext2_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_writepage, @@ -983,8 +983,8 @@ const struct address_space_operations ext2_aops = { }; const struct address_space_operations ext2_nobh_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .readpage = ext2_readpage, .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c48dbbf0e9b2..4c34104a94f0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3560,11 +3560,11 @@ static bool ext4_journalled_dirty_folio(struct address_space *mapping, return filemap_dirty_folio(mapping, folio); } -static int ext4_set_page_dirty(struct page *page) +static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio) { - WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); - WARN_ON_ONCE(!page_has_buffers(page)); - return __set_page_dirty_buffers(page); + WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio)); + WARN_ON_ONCE(!folio_buffers(folio)); + return block_dirty_folio(mapping, folio); } static int ext4_iomap_swap_activate(struct swap_info_struct *sis, @@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_aops = { .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, - .set_page_dirty = ext4_set_page_dirty, + .dirty_folio = ext4_dirty_folio, .bmap = ext4_bmap, .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, @@ -3616,7 +3616,7 @@ static const struct address_space_operations ext4_da_aops = { .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, - .set_page_dirty = ext4_set_page_dirty, + .dirty_folio = ext4_dirty_folio, .bmap = ext4_bmap, .invalidate_folio = ext4_invalidate_folio, .releasepage = ext4_releasepage, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 1e2f1e24a073..86957dd07bda 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -342,7 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) } static const struct address_space_operations fat_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = fat_readpage, .readahead = fat_readahead, diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 7c096a75d703..72c9f31ce724 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -606,18 +606,12 @@ out: gfs2_trans_end(sdp); } -/** - * jdata_set_page_dirty - Page dirtying function - * @page: The page to dirty - * - * Returns: 1 if it dirtyed the page, or 0 otherwise - */ - -static int jdata_set_page_dirty(struct page *page) +static bool jdata_dirty_folio(struct address_space *mapping, + struct folio *folio) { if (current->journal_info) - SetPageChecked(page); - return __set_page_dirty_buffers(page); + folio_set_checked(folio); + return block_dirty_folio(mapping, folio); } /** @@ -795,7 +789,7 @@ static const struct address_space_operations gfs2_jdata_aops = { .writepages = gfs2_jdata_writepages, .readpage = gfs2_readpage, .readahead = gfs2_readahead, - .set_page_dirty = jdata_set_page_dirty, + .dirty_folio = jdata_dirty_folio, .bmap = gfs2_bmap, .invalidate_folio = gfs2_invalidate_folio, .releasepage = gfs2_releasepage, diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index d23c8b035447..ac4d27ccd87d 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -89,14 +89,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb } const struct address_space_operations gfs2_meta_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, }; const struct address_space_operations gfs2_rgrp_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 029d1869a224..55f45e9b4930 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -159,7 +159,7 @@ static int hfs_writepages(struct address_space *mapping, } const struct address_space_operations hfs_btree_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, @@ -170,7 +170,7 @@ const struct address_space_operations hfs_btree_aops = { }; const struct address_space_operations hfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfs_readpage, .writepage = hfs_writepage, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index a91b9b5e92a8..446a816aa8e1 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -156,7 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping, } const struct address_space_operations hfsplus_btree_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, @@ -167,7 +167,7 @@ const struct address_space_operations hfsplus_btree_aops = { }; const struct address_space_operations hfsplus_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hfsplus_readpage, .writepage = hfsplus_writepage, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index cf68f5e76ddd..99493a23c5d0 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -245,7 +245,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } const struct address_space_operations hpfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = hpfs_readpage, .writepage = hpfs_writepage, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 3950b3d610a0..27be2e8ba237 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -357,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } const struct address_space_operations jfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = jfs_readpage, .readahead = jfs_readahead, diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 2295804d1893..1e41fba68dcf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -442,7 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations minix_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = minix_readpage, .writepage = minix_writepage, diff --git a/fs/mpage.c b/fs/mpage.c index 87f5cfef6caa..571862da9f56 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -504,7 +504,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, if (!buffer_mapped(bh)) { /* * unmapped dirty buffers are created by - * __set_page_dirty_buffers -> mmapped data + * block_dirty_folio -> mmapped data */ if (buffer_dirty(bh)) goto confused; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 72adca629bc9..78db33decd72 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -434,8 +434,8 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) static const struct address_space_operations def_mdt_aops = { - .set_page_dirty = __set_page_dirty_buffers, - .invalidate_folio = block_invalidate_folio, + .dirty_folio = block_dirty_folio, + .invalidate_folio = block_invalidate_folio, .writepage = nilfs_mdt_write_page, }; diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index dd71f6ac0272..d154dcfe06af 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) iblock = initialized_size >> blocksize_bits; /* - * Be very careful. We have no exclusion from __set_page_dirty_buffers + * Be very careful. We have no exclusion from block_dirty_folio * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * - * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; + * Buffers outside i_size may be dirtied by block_dirty_folio; * handle that here by just cleaning them. */ @@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) // Update initialized size in the attribute and // in the inode. // Again, for each page do: - // __set_page_dirty_buffers(); + // block_dirty_folio(); // put_page() // We don't need to wait on the writes. // Update iblock. @@ -1654,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = { .readpage = ntfs_readpage, #ifdef NTFS_RW .writepage = ntfs_writepage, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, #endif /* NTFS_RW */ .bmap = ntfs_bmap, .migratepage = buffer_migrate_page, @@ -1669,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = { .readpage = ntfs_readpage, #ifdef NTFS_RW .writepage = ntfs_writepage, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, #endif /* NTFS_RW */ .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, @@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { set_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); spin_unlock(&mapping->private_lock); - __set_page_dirty_nobuffers(page); + block_dirty_folio(mapping, page_folio(page)); if (unlikely(buffers_to_free)) { do { bh = buffers_to_free->b_this_page; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index a87ab3ad3cd3..9eab11e3b034 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -1950,7 +1950,7 @@ const struct address_space_operations ntfs_aops = { .write_end = ntfs_write_end, .direct_IO = ntfs_direct_IO, .bmap = ntfs_bmap, - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, }; const struct address_space_operations ntfs_aops_cmpr = { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b274061e22a7..fc890ca2e17e 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2453,7 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } const struct address_space_operations ocfs2_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .readpage = ocfs2_readpage, .readahead = ocfs2_readahead, .writepage = ocfs2_writepage, diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 139d6a21dca1..3f297b541713 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -372,7 +372,7 @@ const struct inode_operations omfs_file_inops = { }; const struct address_space_operations omfs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = omfs_readpage, .readahead = omfs_readahead, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index f7fa70b419d2..e4221fa85ea2 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3201,14 +3201,14 @@ out: return; } -static int reiserfs_set_page_dirty(struct page *page) +static bool reiserfs_dirty_folio(struct address_space *mapping, + struct folio *folio) { - struct inode *inode = page->mapping->host; - if (reiserfs_file_data_log(inode)) { - SetPageChecked(page); - return __set_page_dirty_nobuffers(page); + if (reiserfs_file_data_log(mapping->host)) { + folio_set_checked(folio); + return filemap_dirty_folio(mapping, folio); } - return __set_page_dirty_buffers(page); + return block_dirty_folio(mapping, folio); } /* @@ -3435,5 +3435,5 @@ const struct address_space_operations reiserfs_address_space_operations = { .write_end = reiserfs_write_end, .bmap = reiserfs_aop_bmap, .direct_IO = reiserfs_direct_IO, - .set_page_dirty = reiserfs_set_page_dirty, + .dirty_folio = reiserfs_dirty_folio, }; diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index d39984a1d4d3..409ab5e17803 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -495,7 +495,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations sysv_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = sysv_readpage, .writepage = sysv_writepage, diff --git a/fs/udf/file.c b/fs/udf/file.c index a91011a7bb88..0f6bf2504437 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -125,7 +125,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin } const struct address_space_operations udf_adinicb_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ab98c7aaf9f9..ca4fa710e562 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -235,7 +235,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations udf_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = udf_readpage, .readahead = udf_readahead, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 2d005788c24d..d0dda01620f0 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -526,7 +526,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) } const struct address_space_operations ufs_aops = { - .set_page_dirty = __set_page_dirty_buffers, + .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .readpage = ufs_readpage, .writepage = ufs_writepage, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9ee9d003d736..bcb4fe9b8575 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -397,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size) return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } -extern int __set_page_dirty_buffers(struct page *page); +bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #else /* CONFIG_BLOCK */ diff --git a/mm/filemap.c b/mm/filemap.c index 9639b844dd31..bb4e91bf5492 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -72,7 +72,7 @@ * Lock ordering: * * ->i_mmap_rwsem (truncate_pagecache) - * ->private_lock (__free_pte->__set_page_dirty_buffers) + * ->private_lock (__free_pte->block_dirty_folio) * ->swap_lock (exclusive_swap_page, others) * ->i_pages lock * @@ -115,7 +115,7 @@ * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) - * ->private_lock (zap_pte_range->__set_page_dirty_buffers) + * ->private_lock (zap_pte_range->block_dirty_folio) * * ->i_mmap_rwsem * ->tasklist_lock (memory_failure, collect_procs_ao) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 27a87ae4502c..e890db239fae 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2530,7 +2530,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, * This is also sometimes used by filesystems which use buffer_heads when * a single buffer is being dirtied: we want to set the folio dirty in * that case, but not all the buffers. This is a "bottom-up" dirtying, - * whereas __set_page_dirty_buffers() is a "top-down" dirtying. + * whereas block_dirty_folio() is a "top-down" dirtying. * * The caller must ensure this doesn't race with truncation. Most will * simply hold the folio lock, but e.g. zap_pte_range() calls with the diff --git a/mm/rmap.c b/mm/rmap.c index 6a1e8c7f6213..4f3391fa4ca9 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -31,8 +31,8 @@ * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) - * mapping->private_lock (in __set_page_dirty_buffers) - * lock_page_memcg move_lock (in __set_page_dirty_buffers) + * mapping->private_lock (in block_dirty_folio) + * folio_lock_memcg move_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) -- cgit v1.2.3-71-gd317 From 46de8b979492e1377947700ecb1e3169088668b2 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:13 +0000 Subject: fs: Convert __set_page_dirty_no_writeback to noop_dirty_folio This is a mechanical change. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- drivers/dax/device.c | 2 +- fs/aio.c | 2 +- fs/ext2/inode.c | 2 +- fs/ext4/inode.c | 2 +- fs/fuse/dax.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/libfs.c | 4 ++-- fs/xfs/xfs_aops.c | 2 +- include/linux/pagemap.h | 2 +- mm/page-writeback.c | 10 +++++----- mm/page_io.c | 2 +- mm/secretmem.c | 2 +- mm/shmem.c | 2 +- 13 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 7a59ca51217e..5494d745ced5 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -346,7 +346,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp, } static const struct address_space_operations dev_dax_aops = { - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, }; static int dax_open(struct inode *inode, struct file *filp) diff --git a/fs/aio.c b/fs/aio.c index 4ceba13a7db0..d6b7160c2a77 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -478,7 +478,7 @@ out: #endif static const struct address_space_operations aio_ctx_aops = { - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index d9452a051198..52377a0ee735 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1000,7 +1000,7 @@ const struct address_space_operations ext2_nobh_aops = { static const struct address_space_operations ext2_dax_aops = { .writepages = ext2_dax_writepages, .direct_IO = noop_direct_IO, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, }; /* diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4c34104a94f0..436efd31cc27 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3630,7 +3630,7 @@ static const struct address_space_operations ext4_da_aops = { static const struct address_space_operations ext4_dax_aops = { .writepages = ext4_dax_writepages, .direct_IO = noop_direct_IO, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, .bmap = ext4_bmap, .swap_activate = ext4_iomap_swap_activate, }; diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index b11fa10b88d8..d7d3a7f06862 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -1326,7 +1326,7 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi) static const struct address_space_operations fuse_dax_file_aops = { .writepages = fuse_dax_writepages, .direct_IO = noop_direct_IO, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, }; static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a7c6c7498be0..f544f622598d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1144,7 +1144,7 @@ static void hugetlbfs_destroy_inode(struct inode *inode) static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, .migratepage = hugetlbfs_migrate_page, .error_remove_page = hugetlbfs_error_remove_page, }; diff --git a/fs/libfs.c b/fs/libfs.c index 4e047841e61d..e64bdedef168 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -631,7 +631,7 @@ const struct address_space_operations ram_aops = { .readpage = simple_readpage, .write_begin = simple_write_begin, .write_end = simple_write_end, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, }; EXPORT_SYMBOL(ram_aops); @@ -1220,7 +1220,7 @@ EXPORT_SYMBOL(kfree_link); struct inode *alloc_anon_inode(struct super_block *s) { static const struct address_space_operations anon_aops = { - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, }; struct inode *inode = new_inode_pseudo(s); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 37b03675b8c3..90b7f4d127de 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -581,6 +581,6 @@ const struct address_space_operations xfs_address_space_operations = { const struct address_space_operations xfs_dax_aops = { .writepages = xfs_dax_writepages, .direct_IO = noop_direct_IO, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, .swap_activate = xfs_iomap_swapfile_activate, }; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 6a9617e9c6bc..ab85d2856a37 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -919,7 +919,7 @@ static inline int __must_check write_one_page(struct page *page) } int __set_page_dirty_nobuffers(struct page *page); -int __set_page_dirty_no_writeback(struct page *page); +bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); void page_endio(struct page *page, bool is_write, int err); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e890db239fae..4557a8d3dfea 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2430,13 +2430,13 @@ EXPORT_SYMBOL(folio_write_one); /* * For address_spaces which do not use buffers nor write back. */ -int __set_page_dirty_no_writeback(struct page *page) +bool noop_dirty_folio(struct address_space *mapping, struct folio *folio) { - if (!PageDirty(page)) - return !TestSetPageDirty(page); - return 0; + if (!folio_test_dirty(folio)) + return !folio_test_set_dirty(folio); + return false; } -EXPORT_SYMBOL(__set_page_dirty_no_writeback); +EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. diff --git a/mm/page_io.c b/mm/page_io.c index 8f20f4dad289..e3333973335e 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -453,6 +453,6 @@ bool swap_dirty_folio(struct address_space *mapping, struct folio *folio) return aops->dirty_folio(mapping, folio); return aops->set_page_dirty(&folio->page); } else { - return __set_page_dirty_no_writeback(&folio->page); + return noop_dirty_folio(mapping, folio); } } diff --git a/mm/secretmem.c b/mm/secretmem.c index 22b310adb53d..098638d3b8a4 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -152,7 +152,7 @@ static void secretmem_freepage(struct page *page) } const struct address_space_operations secretmem_aops = { - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, .freepage = secretmem_freepage, .migratepage = secretmem_migratepage, .isolate_page = secretmem_isolate_page, diff --git a/mm/shmem.c b/mm/shmem.c index a09b29ec2b45..5944159bc43e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3753,7 +3753,7 @@ static int shmem_error_remove_page(struct address_space *mapping, const struct address_space_operations shmem_aops = { .writepage = shmem_writepage, - .set_page_dirty = __set_page_dirty_no_writeback, + .dirty_folio = noop_dirty_folio, #ifdef CONFIG_TMPFS .write_begin = shmem_write_begin, .write_end = shmem_write_end, -- cgit v1.2.3-71-gd317 From 3a3bae50af5d73fab5da20484029de77ca67bb2e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 9 Feb 2022 20:22:15 +0000 Subject: fs: Remove aops ->set_page_dirty With all implementations converted to ->dirty_folio, we can stop calling this fallback method and remove it entirely. Signed-off-by: Matthew Wilcox (Oracle) Tested-by: Damien Le Moal Acked-by: Damien Le Moal Tested-by: Mike Marshall # orangefs Tested-by: David Howells # afs --- fs/ecryptfs/mmap.c | 2 +- include/linux/fs.h | 3 +-- mm/page-writeback.c | 11 +++-------- mm/page_io.c | 4 +--- 4 files changed, 6 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 9aabcb2f52e9..9ad61b582f07 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -540,7 +540,7 @@ const struct address_space_operations ecryptfs_aops = { * XXX: This is pretty broken for multiple reasons: ecryptfs does not * actually use buffer_heads, and ecryptfs will crash without * CONFIG_BLOCK. But it matches the behavior before the default for - * address_space_operations without the ->set_page_dirty method was + * address_space_operations without the ->dirty_folio method was * cleaned up, so this is the best we can do without maintainer * feedback. */ diff --git a/include/linux/fs.h b/include/linux/fs.h index c3d5db8851ae..b472d78f00b0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -367,8 +367,7 @@ struct address_space_operations { /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); - /* Set a page dirty. Return true if this dirtied it */ - int (*set_page_dirty)(struct page *page); + /* Mark a folio dirty. Return true if this dirtied it */ bool (*dirty_folio)(struct address_space *, struct folio *); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4557a8d3dfea..0997738545dd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2641,15 +2641,10 @@ bool folio_mark_dirty(struct folio *folio) */ if (folio_test_reclaim(folio)) folio_clear_reclaim(folio); - if (mapping->a_ops->dirty_folio) - return mapping->a_ops->dirty_folio(mapping, folio); - return mapping->a_ops->set_page_dirty(&folio->page); + return mapping->a_ops->dirty_folio(mapping, folio); } - if (!folio_test_dirty(folio)) { - if (!folio_test_set_dirty(folio)) - return true; - } - return false; + + return noop_dirty_folio(mapping, folio); } EXPORT_SYMBOL(folio_mark_dirty); diff --git a/mm/page_io.c b/mm/page_io.c index e3333973335e..6bde053b9d9d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -449,9 +449,7 @@ bool swap_dirty_folio(struct address_space *mapping, struct folio *folio) aops = mapping->a_ops; VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); - if (aops->dirty_folio) - return aops->dirty_folio(mapping, folio); - return aops->set_page_dirty(&folio->page); + return aops->dirty_folio(mapping, folio); } else { return noop_dirty_folio(mapping, folio); } -- cgit v1.2.3-71-gd317 From f6c46b1d62f8ffbf2cf6eb43ab0d277bd3f7e948 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 11 Mar 2022 19:20:17 +0000 Subject: PM: hibernate: Honour ACPI hardware signature by default for virtual guests The ACPI specification says that OSPM should refuse to restore from hibernate if the hardware signature changes, and should boot from scratch. However, real BIOSes often vary the hardware signature in cases where we *do* want to resume from hibernate, so Linux doesn't follow the spec by default. However, in a virtual environment there's no reason for the VMM to vary the hardware signature *unless* it wants to trigger a clean reboot as defined by the ACPI spec. So enable the check by default if a hypervisor is detected. Signed-off-by: David Woodhouse Signed-off-by: Rafael J. Wysocki --- arch/x86/kernel/acpi/sleep.c | 23 +++++++++++++++++++++-- drivers/acpi/sleep.c | 11 +++-------- include/linux/acpi.h | 2 +- 3 files changed, 25 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 1e97f944b47d..3b7f4cdbf2e0 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "../../realmode/rm/wakeup.h" @@ -140,9 +141,9 @@ static int __init acpi_sleep_setup(char *str) acpi_realmode_flags |= 4; #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_hwsig", 8) == 0) - acpi_check_s4_hw_signature(1); + acpi_check_s4_hw_signature = 1; if (strncmp(str, "s4_nohwsig", 10) == 0) - acpi_check_s4_hw_signature(0); + acpi_check_s4_hw_signature = 0; #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); @@ -160,3 +161,21 @@ static int __init acpi_sleep_setup(char *str) } __setup("acpi_sleep=", acpi_sleep_setup); + +#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HYPERVISOR_GUEST) +static int __init init_s4_sigcheck(void) +{ + /* + * If running on a hypervisor, honour the ACPI specification + * by default and trigger a clean reboot when the hardware + * signature in FACS is changed after hibernation. + */ + if (acpi_check_s4_hw_signature == -1 && + !hypervisor_is_type(X86_HYPER_NATIVE)) + acpi_check_s4_hw_signature = 1; + + return 0; +} +/* This must happen before acpi_init() which is a subsys initcall */ +arch_initcall(init_s4_sigcheck); +#endif diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index d4fbea91ab6b..74e54fd44b8e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -869,12 +869,7 @@ static inline void acpi_sleep_syscore_init(void) {} #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; -static int sigcheck = -1; /* Default behaviour is just to warn */ - -void __init acpi_check_s4_hw_signature(int check) -{ - sigcheck = check; -} +int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */ static int acpi_hibernation_begin(pm_message_t stage) { @@ -999,7 +994,7 @@ static void acpi_sleep_hibernate_setup(void) hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; - if (!sigcheck) + if (!acpi_check_s4_hw_signature) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); @@ -1011,7 +1006,7 @@ static void acpi_sleep_hibernate_setup(void) */ s4_hardware_signature = facs->hardware_signature; - if (sigcheck > 0) { + if (acpi_check_s4_hw_signature > 0) { /* * If we're actually obeying the ACPI specification * then the signature is written out as part of the diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..766dbcb82df1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -526,7 +526,7 @@ acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION -void __init acpi_check_s4_hw_signature(int check); +extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP -- cgit v1.2.3-71-gd317 From d2a3b7c5becc3992f8e7d2b9bf5eacceeedb9a48 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 9 Mar 2022 20:33:20 +0800 Subject: bpf: Fix net.core.bpf_jit_harden race It is the bpf_jit_harden counterpart to commit 60b58afc96c9 ("bpf: fix net.core.bpf_jit_enable race"). bpf_jit_harden will be tested twice for each subprog if there are subprogs in bpf program and constant blinding may increase the length of program, so when running "./test_progs -t subprogs" and toggling bpf_jit_harden between 0 and 2, jit_subprogs may fail because constant blinding increases the length of subprog instructions during extra passs. So cache the value of bpf_jit_blinding_enabled() during program allocation, and use the cached value during constant blinding, subprog JITing and args tracking of tail call. Signed-off-by: Hou Tao Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220309123321.2400262-4-houtao1@huawei.com --- include/linux/filter.h | 1 + kernel/bpf/core.c | 3 ++- kernel/bpf/verifier.c | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 05ed9bd31b45..ed0c0ff42ad5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -566,6 +566,7 @@ struct bpf_prog { gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ + blinding_requested:1, /* needs constant blinding */ blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ab630f773ec1..1324f9523e7c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -105,6 +105,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag fp->aux = aux; fp->aux->prog = fp; fp->jit_requested = ebpf_jit_enabled(); + fp->blinding_requested = bpf_jit_blinding_enabled(fp); INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); mutex_init(&fp->aux->used_maps_mutex); @@ -1382,7 +1383,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) struct bpf_insn *insn; int i, rewritten; - if (!bpf_jit_blinding_enabled(prog) || prog->blinded) + if (!prog->blinding_requested || prog->blinded) return prog; clone = bpf_prog_clone_create(prog, GFP_USER); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0db6cd8dcb35..cf92f9c01556 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13023,6 +13023,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; + func[i]->blinding_requested = prog->blinding_requested; func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; func[i]->aux->linfo = prog->aux->linfo; @@ -13146,6 +13147,7 @@ out_free: out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; + prog->blinding_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (!bpf_pseudo_call(insn)) continue; @@ -13239,7 +13241,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; enum bpf_attach_type eatype = prog->expected_attach_type; - bool expect_blinding = bpf_jit_blinding_enabled(prog); enum bpf_prog_type prog_type = resolve_prog_type(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; @@ -13403,7 +13404,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; - if (env->bpf_capable && !expect_blinding && + if (env->bpf_capable && !prog->blinding_requested && prog->jit_requested && !bpf_map_key_poisoned(aux) && !bpf_map_ptr_poisoned(aux) && -- cgit v1.2.3-71-gd317 From 4ee06de7729d795773145692e246a06448b1eb7a Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 15 Mar 2022 10:20:08 +0100 Subject: net: handle ARPHRD_PIMREG in dev_is_mac_header_xmit() This kind of interface doesn't have a mac header. This patch fixes bpf_redirect() to a PIM interface. Fixes: 27b29f63058d ("bpf: add bpf_redirect() helper") Signed-off-by: Nicolas Dichtel Link: https://lore.kernel.org/r/20220315092008.31423-1-nicolas.dichtel@6wind.com Signed-off-by: Jakub Kicinski --- include/linux/if_arp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index b712217f7030..1ed52441972f 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -52,6 +52,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: + case ARPHRD_PIMREG: return false; default: return true; -- cgit v1.2.3-71-gd317 From c42fa24b44751c62c86e98430ef915c0609a2ab8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 16 Mar 2022 13:39:03 +0100 Subject: ACPI: bus: Avoid using CPPC if not supported by firmware If the platform firmware indicates that it does not support CPPC by clearing the OSC_SB_CPC_SUPPORT and OSC_SB_CPCV2_SUPPORT bits in the platform _OSC capabilities mask, avoid attempting to evaluate _CPC which may fail in that case. Because the OSC_SB_CPC_SUPPORT and OSC_SB_CPCV2_SUPPORT bits are only added to the supported platform capabilities mask on x86, when X86_FEATURE_HWP is supported, allow _CPC to be evaluated regardless in the other cases. Link: https://lore.kernel.org/linux-acpi/CAJZ5v0i=ecAksq0TV+iLVObm-=fUfdqPABzzkgm9K6KxO1ZCcg@mail.gmail.com Signed-off-by: Rafael J. Wysocki Tested-by: Mario Limonciello Acked-by: Huang Rui Reviewed-by: Mika Westerberg --- drivers/acpi/bus.c | 8 ++++++++ drivers/acpi/cppc_acpi.c | 3 +++ include/linux/acpi.h | 1 + 3 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c7670d441ad9..8aa7860602e7 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -283,6 +283,8 @@ EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed); bool osc_sb_native_usb4_support_confirmed; EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed); +bool osc_sb_cppc_not_supported; + static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; static void acpi_bus_osc_negotiate_platform_control(void) { @@ -338,6 +340,12 @@ static void acpi_bus_osc_negotiate_platform_control(void) return; } +#ifdef CONFIG_X86 + if (boot_cpu_has(X86_FEATURE_HWP)) + osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] & + (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT)); +#endif + /* * Now run _OSC again with query flag clear and with the caps * supported by both the OS and the platform. diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 866560cbb082..3e89ad246b60 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -656,6 +656,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) acpi_status status; int ret = -EFAULT; + if (osc_sb_cppc_not_supported) + return -ENODEV; + /* Parse the ACPI _CPC table for this CPU. */ status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, ACPI_TYPE_PACKAGE); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..690b0533104a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -580,6 +580,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; +extern bool osc_sb_cppc_not_supported; /* USB4 Capabilities */ #define OSC_USB_USB3_TUNNELING 0x00000001 -- cgit v1.2.3-71-gd317 From 20e1d6402a71dba7ad2b81f332a3c14c7d3b939b Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 17 Mar 2022 09:14:42 -0500 Subject: ACPI / x86: Add support for LPS0 callback handler Currenty the latest thing run during a suspend to idle attempt is the LPS0 `prepare_late` callback and the earliest thing is the `resume_early` callback. There is a desire for the `amd-pmc` driver to suspend later in the suspend process (ideally the very last thing), so create a callback that it or any other driver can hook into to do this. Signed-off-by: Mario Limonciello Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20220317141445.6498-1-mario.limonciello@amd.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/acpi/x86/s2idle.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 10 +++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index abc06e7f89d8..031b20a547f9 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -86,6 +86,8 @@ struct lpi_device_constraint_amd { int min_dstate; }; +static LIST_HEAD(lps0_s2idle_devops_head); + static struct lpi_constraints *lpi_constraints_table; static int lpi_constraints_table_size; static int rev_id; @@ -444,6 +446,8 @@ static struct acpi_scan_handler lps0_handler = { int acpi_s2idle_prepare_late(void) { + struct acpi_s2idle_dev_ops *handler; + if (!lps0_device_handle || sleep_no_lps0) return 0; @@ -474,14 +478,26 @@ int acpi_s2idle_prepare_late(void) acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); } + + list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { + if (handler->prepare) + handler->prepare(); + } + return 0; } void acpi_s2idle_restore_early(void) { + struct acpi_s2idle_dev_ops *handler; + if (!lps0_device_handle || sleep_no_lps0) return; + list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) + if (handler->restore) + handler->restore(); + /* Modern standby exit */ if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, @@ -524,4 +540,28 @@ void acpi_s2idle_setup(void) s2idle_set_ops(&acpi_s2idle_ops_lps0); } +int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ + if (!lps0_device_handle || sleep_no_lps0) + return -ENODEV; + + lock_system_sleep(); + list_add(&arg->list_node, &lps0_s2idle_devops_head); + unlock_system_sleep(); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_lps0_dev); + +void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) +{ + if (!lps0_device_handle || sleep_no_lps0) + return; + + lock_system_sleep(); + list_del(&arg->list_node); + unlock_system_sleep(); +} +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev); + #endif /* CONFIG_SUSPEND */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..47c16cdc8e0e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1023,7 +1023,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); - +#ifdef CONFIG_X86 +struct acpi_s2idle_dev_ops { + struct list_head list_node; + void (*prepare)(void); + void (*restore)(void); +}; +int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); +void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); +#endif /* CONFIG_X86 */ #ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else -- cgit v1.2.3-71-gd317 From 4206fe40b2c01fb5988980cff6ea958b16578026 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 27 Apr 2021 16:30:32 +0300 Subject: net/mlx5: Remove unused exported contiguous coherent buffer allocation API All WQ types moved to using the fragmented allocation API for coherent memory. Contiguous API is not used anymore. Remove it, reduce the number of exported functions. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 47 ------------------------- include/linux/mlx5/driver.h | 3 -- 2 files changed, 50 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index d5408f6ce5a7..1762c5c36042 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, return cpu_handle; } -static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_frag_buf *buf, int node) -{ - dma_addr_t t; - - buf->size = size; - buf->npages = 1; - buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - - buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL); - if (!buf->frags) - return -ENOMEM; - - buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size, - &t, node); - if (!buf->frags->buf) - goto err_out; - - buf->frags->map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - - return 0; -err_out: - kfree(buf->frags); - return -ENOMEM; -} - -int mlx5_buf_alloc(struct mlx5_core_dev *dev, - int size, struct mlx5_frag_buf *buf) -{ - return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); -} -EXPORT_SYMBOL(mlx5_buf_alloc); - -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) -{ - dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf, - buf->frags->map); - - kfree(buf->frags); -} -EXPORT_SYMBOL_GPL(mlx5_buf_free); - int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 00a914b0716e..a386aec1eb65 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1009,9 +1009,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, - int size, struct mlx5_frag_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); -- cgit v1.2.3-71-gd317 From 770c9a3a01af178a90368a78c75eb91707c7233c Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 20 May 2021 15:34:57 +0300 Subject: net/mlx5: Remove unused fill page array API function mlx5_fill_page_array API function is not used. Remove it, reduce the number of exported functions. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 13 ------------- include/linux/mlx5/driver.h | 1 - 2 files changed, 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 1762c5c36042..e52b0bac09da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -239,19 +239,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) } EXPORT_SYMBOL_GPL(mlx5_db_free); -void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) -{ - u64 addr; - int i; - - for (i = 0; i < buf->npages; i++) { - addr = buf->frags->map + (i << buf->page_shift); - - pas[i] = cpu_to_be64(addr); - } -} -EXPORT_SYMBOL_GPL(mlx5_fill_page_array); - void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm) { int i; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a386aec1eb65..96cd740d94a3 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1036,7 +1036,6 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); -- cgit v1.2.3-71-gd317 From cceac97afa090284b3ceecd93ea6b7b527116767 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:49 +0100 Subject: net: bridge: mst: Add helper to map an MSTI to a VID set br_mst_get_info answers the question: "On this bridge, which VIDs are mapped to the given MSTI?" This is useful in switchdev drivers, which might have to fan-out operations, relating to an MSTI, per VLAN. An example: When a port's MST state changes from forwarding to blocking, a driver may choose to flush the dynamic FDB entries on that port to get faster reconvergence of the network, but this should only be done in the VLANs that are managed by the MSTI in question. Signed-off-by: Tobias Waldekranz Reviewed-by: Vladimir Oltean Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/linux/if_bridge.h | 7 +++++++ net/bridge/br_mst.c | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 3aae023a9353..1cf0cc46d90d 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -119,6 +119,7 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); +int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -151,6 +152,12 @@ static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, { return -EINVAL; } + +static inline int br_mst_get_info(const struct net_device *dev, u16 msti, + unsigned long *vids) +{ + return -EINVAL; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 00935a19afcc..00b36e629224 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -13,6 +13,32 @@ DEFINE_STATIC_KEY_FALSE(br_mst_used); +int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) +{ + const struct net_bridge_vlan_group *vg; + const struct net_bridge_vlan *v; + const struct net_bridge *br; + + ASSERT_RTNL(); + + if (!netif_is_bridge_master(dev)) + return -EINVAL; + + br = netdev_priv(dev); + if (!br_opt_get(br, BROPT_MST_ENABLED)) + return -EINVAL; + + vg = br_vlan_group(br); + + list_for_each_entry(v, &vg->vlan_list, vlist) { + if (v->msti == msti) + __set_bit(v->vid, vids); + } + + return 0; +} +EXPORT_SYMBOL_GPL(br_mst_get_info); + static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v, u8 state) { -- cgit v1.2.3-71-gd317 From 48d57b2e5f439246c4e12ed7705a5e3241294b03 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:50 +0100 Subject: net: bridge: mst: Add helper to check if MST is enabled This is useful for switchdev drivers that might want to refuse to join a bridge where MST is enabled, if the hardware can't support it. Signed-off-by: Tobias Waldekranz Reviewed-by: Vladimir Oltean Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/linux/if_bridge.h | 6 ++++++ net/bridge/br_mst.c | 9 +++++++++ 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 1cf0cc46d90d..4efd5540279a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -119,6 +119,7 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); +bool br_mst_enabled(const struct net_device *dev); int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids); #else static inline bool br_vlan_enabled(const struct net_device *dev) @@ -153,6 +154,11 @@ static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, return -EINVAL; } +static inline bool br_mst_enabled(const struct net_device *dev) +{ + return false; +} + static inline int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) { diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 00b36e629224..830a5746479f 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -13,6 +13,15 @@ DEFINE_STATIC_KEY_FALSE(br_mst_used); +bool br_mst_enabled(const struct net_device *dev) +{ + if (!netif_is_bridge_master(dev)) + return false; + + return br_opt_get(netdev_priv(dev), BROPT_MST_ENABLED); +} +EXPORT_SYMBOL_GPL(br_mst_enabled); + int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) { const struct net_bridge_vlan_group *vg; -- cgit v1.2.3-71-gd317 From f54fd0e163068ced4d0d27db64cd3cbda95b74e4 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:51 +0100 Subject: net: bridge: mst: Add helper to query a port's MST state This is useful for switchdev drivers who are offloading MST states into hardware. As an example, a driver may wish to flush the FDB for a port when it transitions from forwarding to blocking - which means that the previous state must be discoverable. Signed-off-by: Tobias Waldekranz Reviewed-by: Vladimir Oltean Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/linux/if_bridge.h | 6 ++++++ net/bridge/br_mst.c | 25 +++++++++++++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 4efd5540279a..d62ef428e3aa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -121,6 +121,7 @@ int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); bool br_mst_enabled(const struct net_device *dev); int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids); +int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -164,6 +165,11 @@ static inline int br_mst_get_info(const struct net_device *dev, u16 msti, { return -EINVAL; } +static inline int br_mst_get_state(const struct net_device *dev, u16 msti, + u8 *state) +{ + return -EINVAL; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 830a5746479f..ee680adcee17 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -48,6 +48,31 @@ int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids) } EXPORT_SYMBOL_GPL(br_mst_get_info); +int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state) +{ + const struct net_bridge_port *p = NULL; + const struct net_bridge_vlan_group *vg; + const struct net_bridge_vlan *v; + + ASSERT_RTNL(); + + p = br_port_get_check_rtnl(dev); + if (!p || !br_opt_get(p->br, BROPT_MST_ENABLED)) + return -EINVAL; + + vg = nbp_vlan_group(p); + + list_for_each_entry(v, &vg->vlan_list, vlist) { + if (v->brvlan->msti == msti) { + *state = v->state; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(br_mst_get_state); + static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v, u8 state) { -- cgit v1.2.3-71-gd317 From 4f554e955614f19425cee86de4669351741a6280 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 15 Mar 2022 23:00:26 +0900 Subject: ftrace: Add ftrace_set_filter_ips function Adding ftrace_set_filter_ips function to be able to set filter on multiple ip addresses at once. With the kprobe multi attach interface we have cases where we need to initialize ftrace_ops object with thousands of functions, so having single function diving into ftrace_hash_move_and_update_ops with ftrace_lock is faster. The functions ips are passed as unsigned long array with count. Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735282673.1084943.18310504594134769804.stgit@devnote2 --- include/linux/ftrace.h | 3 +++ kernel/trace/ftrace.c | 58 ++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 52 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9999e29187de..60847cbce0da 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -512,6 +512,8 @@ struct dyn_ftrace { int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, @@ -802,6 +804,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) +#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a4b462b6f944..93e992962ada 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4958,7 +4958,7 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static int -ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) +__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) { struct ftrace_func_entry *entry; @@ -4976,9 +4976,30 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return add_hash_entry(hash, ip); } +static int +ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, + unsigned int cnt, int remove) +{ + unsigned int i; + int err; + + for (i = 0; i < cnt; i++) { + err = __ftrace_match_addr(hash, ips[i], remove); + if (err) { + /* + * This expects the @hash is a temporary hash and if this + * fails the caller must free the @hash. + */ + return err; + } + } + return 0; +} + static int ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, - unsigned long ip, int remove, int reset, int enable) + unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { struct ftrace_hash **orig_hash; struct ftrace_hash *hash; @@ -5008,8 +5029,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ret = -EINVAL; goto out_regex_unlock; } - if (ip) { - ret = ftrace_match_addr(hash, ip, remove); + if (ips) { + ret = ftrace_match_addr(hash, ips, cnt, remove); if (ret < 0) goto out_regex_unlock; } @@ -5026,10 +5047,10 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, } static int -ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, - int reset, int enable) +ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { - return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); + return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable); } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -5634,10 +5655,29 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) { ftrace_ops_init(ops); - return ftrace_set_addr(ops, ip, remove, reset, 1); + return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); +/** + * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses + * @ops - the ops to set the filter with + * @ips - the array of addresses to add to or remove from the filter. + * @cnt - the number of addresses in @ips + * @remove - non zero to remove ips from the filter + * @reset - non zero to reset all filters before applying this filter. + * + * Filters denote which functions should be enabled when tracing is enabled + * If @ips array or any ip specified within is NULL , it fails to update filter. + */ +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset) +{ + ftrace_ops_init(ops); + return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); +} +EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); + /** * ftrace_ops_set_global_filter - setup ops to use global filters * @ops - the ops which will use the global filters @@ -5659,7 +5699,7 @@ static int ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int reset, int enable) { - return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); + return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable); } /** -- cgit v1.2.3-71-gd317 From cad9931f64dc7f5dbdec12cae9f30063360f9855 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:00:38 +0900 Subject: fprobe: Add ftrace based probe APIs The fprobe is a wrapper API for ftrace function tracer. Unlike kprobes, this probes only supports the function entry, but this can probe multiple functions by one fprobe. The usage is similar, user will set their callback to fprobe::entry_handler and call register_fprobe*() with probed functions. There are 3 registration interfaces, - register_fprobe() takes filtering patterns of the functin names. - register_fprobe_ips() takes an array of ftrace-location addresses. - register_fprobe_syms() takes an array of function names. The registered fprobes can be unregistered with unregister_fprobe(). e.g. struct fprobe fp = { .entry_handler = user_handler }; const char *targets[] = { "func1", "func2", "func3"}; ... ret = register_fprobe_syms(&fp, targets, ARRAY_SIZE(targets)); ... unregister_fprobe(&fp); Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735283857.1084943.1154436951479395551.stgit@devnote2 --- include/linux/fprobe.h | 87 ++++++++++++++++++++ kernel/trace/Kconfig | 12 +++ kernel/trace/Makefile | 1 + kernel/trace/fprobe.c | 211 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 311 insertions(+) create mode 100644 include/linux/fprobe.h create mode 100644 kernel/trace/fprobe.c (limited to 'include/linux') diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h new file mode 100644 index 000000000000..2ba099aff041 --- /dev/null +++ b/include/linux/fprobe.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Simple ftrace probe wrapper */ +#ifndef _LINUX_FPROBE_H +#define _LINUX_FPROBE_H + +#include +#include + +/** + * struct fprobe - ftrace based probe. + * @ops: The ftrace_ops. + * @nmissed: The counter for missing events. + * @flags: The status flag. + * @entry_handler: The callback function for function entry. + */ +struct fprobe { +#ifdef CONFIG_FUNCTION_TRACER + /* + * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too. + * But user of fprobe may keep embedding the struct fprobe on their own + * code. To avoid build error, this will keep the fprobe data structure + * defined here, but remove ftrace_ops data structure. + */ + struct ftrace_ops ops; +#endif + unsigned long nmissed; + unsigned int flags; + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); +}; + +#define FPROBE_FL_DISABLED 1 + +static inline bool fprobe_disabled(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; +} + +#ifdef CONFIG_FPROBE +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); +int unregister_fprobe(struct fprobe *fp); +#else +static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + return -EOPNOTSUPP; +} +static inline int unregister_fprobe(struct fprobe *fp) +{ + return -EOPNOTSUPP; +} +#endif + +/** + * disable_fprobe() - Disable fprobe + * @fp: The fprobe to be disabled. + * + * This will soft-disable @fp. Note that this doesn't remove the ftrace + * hooks from the function entry. + */ +static inline void disable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags |= FPROBE_FL_DISABLED; +} + +/** + * enable_fprobe() - Enable fprobe + * @fp: The fprobe to be enabled. + * + * This will soft-enable @fp. + */ +static inline void enable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags &= ~FPROBE_FL_DISABLED; +} + +#endif diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a5eb5e7fd624..7ce31abc542b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -236,6 +236,18 @@ config DYNAMIC_FTRACE_WITH_ARGS depends on DYNAMIC_FTRACE depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS +config FPROBE + bool "Kernel Function Probe (fprobe)" + depends on FUNCTION_TRACER + depends on DYNAMIC_FTRACE_WITH_REGS + default n + help + This option enables kernel function probe (fprobe) based on ftrace, + which is similar to kprobes, but probes only for kernel function + entries and it can probe multiple functions by one fprobe. + + If unsure, say N. + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index bedc5caceec7..79255f9de9a4 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -97,6 +97,7 @@ obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o +obj-$(CONFIG_FPROBE) += fprobe.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c new file mode 100644 index 000000000000..7e8ceee339a0 --- /dev/null +++ b/kernel/trace/fprobe.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fprobe - Simple ftrace probe wrapper for function entry. + */ +#define pr_fmt(fmt) "fprobe: " fmt + +#include +#include +#include +#include +#include +#include + +static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp; + int bit; + + fp = container_of(ops, struct fprobe, ops); + if (fprobe_disabled(fp)) + return; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } + + if (fp->entry_handler) + fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); + + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(fprobe_handler); + +/* Convert ftrace location address from symbols */ +static unsigned long *get_ftrace_locations(const char **syms, int num) +{ + unsigned long addr, size; + unsigned long *addrs; + int i; + + /* Convert symbols to symbol address */ + addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL); + if (!addrs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < num; i++) { + addr = kallsyms_lookup_name(syms[i]); + if (!addr) /* Maybe wrong symbol */ + goto error; + + /* Convert symbol address to ftrace location. */ + if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size) + goto error; + + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) /* No dynamic ftrace there. */ + goto error; + + addrs[i] = addr; + } + + return addrs; + +error: + kfree(addrs); + + return ERR_PTR(-ENOENT); +} + +static void fprobe_init(struct fprobe *fp) +{ + fp->nmissed = 0; + fp->ops.func = fprobe_handler; + fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; +} + +/** + * register_fprobe() - Register fprobe to ftrace by pattern. + * @fp: A fprobe data structure to be registered. + * @filter: A wildcard pattern of probed symbols. + * @notfilter: A wildcard pattern of NOT probed symbols. + * + * Register @fp to ftrace for enabling the probe on the symbols matched to @filter. + * If @notfilter is not NULL, the symbols matched the @notfilter are not probed. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + unsigned char *str; + int ret, len; + + if (!fp || !filter) + return -EINVAL; + + fprobe_init(fp); + + len = strlen(filter); + str = kstrdup(filter, GFP_KERNEL); + ret = ftrace_set_filter(&fp->ops, str, len, 0); + kfree(str); + if (ret) + return ret; + + if (notfilter) { + len = strlen(notfilter); + str = kstrdup(notfilter, GFP_KERNEL); + ret = ftrace_set_notrace(&fp->ops, str, len, 0); + kfree(str); + if (ret) + goto out; + } + + ret = register_ftrace_function(&fp->ops); +out: + if (ret) + ftrace_free_filter(&fp->ops); + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe); + +/** + * register_fprobe_ips() - Register fprobe to ftrace by address. + * @fp: A fprobe data structure to be registered. + * @addrs: An array of target ftrace location addresses. + * @num: The number of entries of @addrs. + * + * Register @fp to ftrace for enabling the probe on the address given by @addrs. + * The @addrs must be the addresses of ftrace location address, which may be + * the symbol address + arch-dependent offset. + * If you unsure what this mean, please use other registration functions. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + int ret; + + if (!fp || !addrs || num <= 0) + return -EINVAL; + + fprobe_init(fp); + + ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0); + if (!ret) + ret = register_ftrace_function(&fp->ops); + + if (ret) + ftrace_free_filter(&fp->ops); + + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_ips); + +/** + * register_fprobe_syms() - Register fprobe to ftrace by symbols. + * @fp: A fprobe data structure to be registered. + * @syms: An array of target symbols. + * @num: The number of entries of @syms. + * + * Register @fp to the symbols given by @syms array. This will be useful if + * you are sure the symbols exist in the kernel. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + unsigned long *addrs; + int ret; + + if (!fp || !syms || num <= 0) + return -EINVAL; + + addrs = get_ftrace_locations(syms, num); + if (IS_ERR(addrs)) + return PTR_ERR(addrs); + + ret = register_fprobe_ips(fp, addrs, num); + + kfree(addrs); + + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_syms); + +/** + * unregister_fprobe() - Unregister fprobe from ftrace + * @fp: A fprobe data structure to be unregistered. + * + * Unregister fprobe (and remove ftrace hooks from the function entries). + * + * Return 0 if @fp is unregistered successfully, -errno if not. + */ +int unregister_fprobe(struct fprobe *fp) +{ + int ret; + + if (!fp || fp->ops.func != fprobe_handler) + return -EINVAL; + + ret = unregister_ftrace_function(&fp->ops); + + if (!ret) + ftrace_free_filter(&fp->ops); + + return ret; +} +EXPORT_SYMBOL_GPL(unregister_fprobe); -- cgit v1.2.3-71-gd317 From 54ecbe6f1ed5138c895bdff55608cf502755b20e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:00:50 +0900 Subject: rethook: Add a generic return hook Add a return hook framework which hooks the function return. Most of the logic came from the kretprobe, but this is independent from kretprobe. Note that this is expected to be used with other function entry hooking feature, like ftrace, fprobe, adn kprobes. Eventually this will replace the kretprobe (e.g. kprobe + rethook = kretprobe), but at this moment, this is just an additional hook. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735285066.1084943.9259661137330166643.stgit@devnote2 --- include/linux/rethook.h | 100 +++++++++++++++ include/linux/sched.h | 3 + kernel/exit.c | 2 + kernel/fork.c | 3 + kernel/trace/Kconfig | 11 ++ kernel/trace/Makefile | 1 + kernel/trace/rethook.c | 317 ++++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 437 insertions(+) create mode 100644 include/linux/rethook.h create mode 100644 kernel/trace/rethook.c (limited to 'include/linux') diff --git a/include/linux/rethook.h b/include/linux/rethook.h new file mode 100644 index 000000000000..c8ac1e5afcd1 --- /dev/null +++ b/include/linux/rethook.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Return hooking with list-based shadow stack. + */ +#ifndef _LINUX_RETHOOK_H +#define _LINUX_RETHOOK_H + +#include +#include +#include +#include +#include +#include + +struct rethook_node; + +typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *); + +/** + * struct rethook - The rethook management data structure. + * @data: The user-defined data storage. + * @handler: The user-defined return hook handler. + * @pool: The pool of struct rethook_node. + * @ref: The reference counter. + * @rcu: The rcu_head for deferred freeing. + * + * Don't embed to another data structure, because this is a self-destructive + * data structure when all rethook_node are freed. + */ +struct rethook { + void *data; + rethook_handler_t handler; + struct freelist_head pool; + refcount_t ref; + struct rcu_head rcu; +}; + +/** + * struct rethook_node - The rethook shadow-stack entry node. + * @freelist: The freelist, linked to struct rethook::pool. + * @rcu: The rcu_head for deferred freeing. + * @llist: The llist, linked to a struct task_struct::rethooks. + * @rethook: The pointer to the struct rethook. + * @ret_addr: The storage for the real return address. + * @frame: The storage for the frame pointer. + * + * You can embed this to your extended data structure to store any data + * on each entry of the shadow stack. + */ +struct rethook_node { + union { + struct freelist_node freelist; + struct rcu_head rcu; + }; + struct llist_node llist; + struct rethook *rethook; + unsigned long ret_addr; + unsigned long frame; +}; + +struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +void rethook_free(struct rethook *rh); +void rethook_add_node(struct rethook *rh, struct rethook_node *node); +struct rethook_node *rethook_try_get(struct rethook *rh); +void rethook_recycle(struct rethook_node *node); +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur); + +/* Arch dependent code must implement arch_* and trampoline code */ +void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount); +void arch_rethook_trampoline(void); + +/** + * is_rethook_trampoline() - Check whether the address is rethook trampoline + * @addr: The address to be checked + * + * Return true if the @addr is the rethook trampoline address. + */ +static inline bool is_rethook_trampoline(unsigned long addr) +{ + return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline); +} + +/* If the architecture needs to fixup the return address, implement it. */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr); + +/* Generic trampoline handler, arch code must prepare asm stub */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame); + +#ifdef CONFIG_RETHOOK +void rethook_flush_task(struct task_struct *tk); +#else +#define rethook_flush_task(tsk) do { } while (0) +#endif + +#endif + diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..7034f53404e3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1481,6 +1481,9 @@ struct task_struct { #ifdef CONFIG_KRETPROBES struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_RETHOOK + struct llist_head rethooks; +#endif #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH /* diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..2d1803fa8fe6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include @@ -169,6 +170,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); kprobe_flush_task(tsk); + rethook_flush_task(tsk); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index a024bf6254df..3db1a4110a25 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2255,6 +2255,9 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_KRETPROBES p->kretprobe_instances.first = NULL; #endif +#ifdef CONFIG_RETHOOK + p->rethooks.first = NULL; +#endif /* * Ensure that the cgroup subsystem policies allow the new process to be diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 7ce31abc542b..e75504e42ab8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -10,6 +10,17 @@ config USER_STACKTRACE_SUPPORT config NOP_TRACER bool +config HAVE_RETHOOK + bool + +config RETHOOK + bool + depends on HAVE_RETHOOK + help + Enable generic return hooking feature. This is an internal + API, which will be used by other function-entry hooking + features like fprobe and kprobes. + config HAVE_FUNCTION_TRACER bool help diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 79255f9de9a4..c6f11a139eac 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o obj-$(CONFIG_FPROBE) += fprobe.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c new file mode 100644 index 000000000000..ab463a4d2b23 --- /dev/null +++ b/kernel/trace/rethook.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "rethook: " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* Return hook list (shadow stack by list) */ + +/* + * This function is called from delayed_put_task_struct() when a task is + * dead and cleaned up to recycle any kretprobe instances associated with + * this task. These left over instances represent probed functions that + * have been called but will never return. + */ +void rethook_flush_task(struct task_struct *tk) +{ + struct rethook_node *rhn; + struct llist_node *node; + + node = __llist_del_all(&tk->rethooks); + while (node) { + rhn = container_of(node, struct rethook_node, llist); + node = node->next; + preempt_disable(); + rethook_recycle(rhn); + preempt_enable(); + } +} + +static void rethook_free_rcu(struct rcu_head *head) +{ + struct rethook *rh = container_of(head, struct rethook, rcu); + struct rethook_node *rhn; + struct freelist_node *node; + int count = 1; + + node = rh->pool.head; + while (node) { + rhn = container_of(node, struct rethook_node, freelist); + node = node->next; + kfree(rhn); + count++; + } + + /* The rh->ref is the number of pooled node + 1 */ + if (refcount_sub_and_test(count, &rh->ref)) + kfree(rh); +} + +/** + * rethook_free() - Free struct rethook. + * @rh: the struct rethook to be freed. + * + * Free the rethook. Before calling this function, user must ensure the + * @rh::data is cleaned if needed (or, the handler can access it after + * calling this function.) This function will set the @rh to be freed + * after all rethook_node are freed (not soon). And the caller must + * not touch @rh after calling this. + */ +void rethook_free(struct rethook *rh) +{ + rcu_assign_pointer(rh->handler, NULL); + + call_rcu(&rh->rcu, rethook_free_rcu); +} + +/** + * rethook_alloc() - Allocate struct rethook. + * @data: a data to pass the @handler when hooking the return. + * @handler: the return hook callback function. + * + * Allocate and initialize a new rethook with @data and @handler. + * Return NULL if memory allocation fails or @handler is NULL. + * Note that @handler == NULL means this rethook is going to be freed. + */ +struct rethook *rethook_alloc(void *data, rethook_handler_t handler) +{ + struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); + + if (!rh || !handler) + return NULL; + + rh->data = data; + rh->handler = handler; + rh->pool.head = NULL; + refcount_set(&rh->ref, 1); + + return rh; +} + +/** + * rethook_add_node() - Add a new node to the rethook. + * @rh: the struct rethook. + * @node: the struct rethook_node to be added. + * + * Add @node to @rh. User must allocate @node (as a part of user's + * data structure.) The @node fields are initialized in this function. + */ +void rethook_add_node(struct rethook *rh, struct rethook_node *node) +{ + node->rethook = rh; + freelist_add(&node->freelist, &rh->pool); + refcount_inc(&rh->ref); +} + +static void free_rethook_node_rcu(struct rcu_head *head) +{ + struct rethook_node *node = container_of(head, struct rethook_node, rcu); + + if (refcount_dec_and_test(&node->rethook->ref)) + kfree(node->rethook); + kfree(node); +} + +/** + * rethook_recycle() - return the node to rethook. + * @node: The struct rethook_node to be returned. + * + * Return back the @node to @node::rethook. If the @node::rethook is already + * marked as freed, this will free the @node. + */ +void rethook_recycle(struct rethook_node *node) +{ + lockdep_assert_preemption_disabled(); + + if (likely(READ_ONCE(node->rethook->handler))) + freelist_add(&node->freelist, &node->rethook->pool); + else + call_rcu(&node->rcu, free_rethook_node_rcu); +} +NOKPROBE_SYMBOL(rethook_recycle); + +/** + * rethook_try_get() - get an unused rethook node. + * @rh: The struct rethook which pools the nodes. + * + * Get an unused rethook node from @rh. If the node pool is empty, this + * will return NULL. Caller must disable preemption. + */ +struct rethook_node *rethook_try_get(struct rethook *rh) +{ + rethook_handler_t handler = READ_ONCE(rh->handler); + struct freelist_node *fn; + + lockdep_assert_preemption_disabled(); + + /* Check whether @rh is going to be freed. */ + if (unlikely(!handler)) + return NULL; + + fn = freelist_try_get(&rh->pool); + if (!fn) + return NULL; + + return container_of(fn, struct rethook_node, freelist); +} +NOKPROBE_SYMBOL(rethook_try_get); + +/** + * rethook_hook() - Hook the current function return. + * @node: The struct rethook node to hook the function return. + * @regs: The struct pt_regs for the function entry. + * @mcount: True if this is called from mcount(ftrace) context. + * + * Hook the current running function return. This must be called when the + * function entry (or at least @regs must be the registers of the function + * entry.) @mcount is used for identifying the context. If this is called + * from ftrace (mcount) callback, @mcount must be set true. If this is called + * from the real function entry (e.g. kprobes) @mcount must be set false. + * This is because the way to hook the function return depends on the context. + */ +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) +{ + arch_rethook_prepare(node, regs, mcount); + __llist_add(&node->llist, ¤t->rethooks); +} +NOKPROBE_SYMBOL(rethook_hook); + +/* This assumes the 'tsk' is the current task or is not running. */ +static unsigned long __rethook_find_ret_addr(struct task_struct *tsk, + struct llist_node **cur) +{ + struct rethook_node *rh = NULL; + struct llist_node *node = *cur; + + if (!node) + node = tsk->rethooks.first; + else + node = node->next; + + while (node) { + rh = container_of(node, struct rethook_node, llist); + if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) { + *cur = node; + return rh->ret_addr; + } + node = node->next; + } + return 0; +} +NOKPROBE_SYMBOL(__rethook_find_ret_addr); + +/** + * rethook_find_ret_addr -- Find correct return address modified by rethook + * @tsk: Target task + * @frame: A frame pointer + * @cur: a storage of the loop cursor llist_node pointer for next call + * + * Find the correct return address modified by a rethook on @tsk in unsigned + * long type. + * The @tsk must be 'current' or a task which is not running. @frame is a hint + * to get the currect return address - which is compared with the + * rethook::frame field. The @cur is a loop cursor for searching the + * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the + * first call, but '@cur' itself must NOT NULL. + * + * Returns found address value or zero if not found. + */ +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur) +{ + struct rethook_node *rhn = NULL; + unsigned long ret; + + if (WARN_ON_ONCE(!cur)) + return 0; + + if (WARN_ON_ONCE(tsk != current && task_is_running(tsk))) + return 0; + + do { + ret = __rethook_find_ret_addr(tsk, cur); + if (!ret) + break; + rhn = container_of(*cur, struct rethook_node, llist); + } while (rhn->frame != frame); + + return ret; +} +NOKPROBE_SYMBOL(rethook_find_ret_addr); + +void __weak arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr) +{ + /* + * Do nothing by default. If the architecture which uses a + * frame pointer to record real return address on the stack, + * it should fill this function to fixup the return address + * so that stacktrace works from the rethook handler. + */ +} + +/* This function will be called from each arch-defined trampoline. */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame) +{ + struct llist_node *first, *node = NULL; + unsigned long correct_ret_addr; + rethook_handler_t handler; + struct rethook_node *rhn; + + correct_ret_addr = __rethook_find_ret_addr(current, &node); + if (!correct_ret_addr) { + pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n"); + BUG_ON(1); + } + + instruction_pointer_set(regs, correct_ret_addr); + + /* + * These loops must be protected from rethook_free_rcu() because those + * are accessing 'rhn->rethook'. + */ + preempt_disable(); + + /* + * Run the handler on the shadow stack. Do not unlink the list here because + * stackdump inside the handlers needs to decode it. + */ + first = current->rethooks.first; + while (first) { + rhn = container_of(first, struct rethook_node, llist); + if (WARN_ON_ONCE(rhn->frame != frame)) + break; + handler = READ_ONCE(rhn->rethook->handler); + if (handler) + handler(rhn, rhn->rethook->data, regs); + + if (first == node) + break; + first = first->next; + } + + /* Fixup registers for returning to correct address. */ + arch_rethook_fixup_return(regs, correct_ret_addr); + + /* Unlink used shadow stack */ + first = current->rethooks.first; + current->rethooks.first = node->next; + node->next = NULL; + + while (first) { + rhn = container_of(first, struct rethook_node, llist); + first = first->next; + rethook_recycle(rhn); + } + preempt_enable(); + + return correct_ret_addr; +} +NOKPROBE_SYMBOL(rethook_trampoline_handler); -- cgit v1.2.3-71-gd317 From 5b0ab78998e32564a011b14c4c7f9c81e2d42b9d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:48 +0900 Subject: fprobe: Add exit_handler support Add exit_handler to fprobe. fprobe + rethook allows us to hook the kernel function return. The rethook will be enabled only if the fprobe::exit_handler is set. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735290790.1084943.10601965782208052202.stgit@devnote2 --- include/linux/fprobe.h | 6 +++ kernel/trace/Kconfig | 9 ++-- kernel/trace/fprobe.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 122 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 2ba099aff041..8eefec2b485e 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -5,13 +5,16 @@ #include #include +#include /** * struct fprobe - ftrace based probe. * @ops: The ftrace_ops. * @nmissed: The counter for missing events. * @flags: The status flag. + * @rethook: The rethook data structure. (internal data) * @entry_handler: The callback function for function entry. + * @exit_handler: The callback function for function exit. */ struct fprobe { #ifdef CONFIG_FUNCTION_TRACER @@ -25,7 +28,10 @@ struct fprobe { #endif unsigned long nmissed; unsigned int flags; + struct rethook *rethook; + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); }; #define FPROBE_FL_DISABLED 1 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e75504e42ab8..99dd4ca63d68 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -251,11 +251,14 @@ config FPROBE bool "Kernel Function Probe (fprobe)" depends on FUNCTION_TRACER depends on DYNAMIC_FTRACE_WITH_REGS + depends on HAVE_RETHOOK + select RETHOOK default n help - This option enables kernel function probe (fprobe) based on ftrace, - which is similar to kprobes, but probes only for kernel function - entries and it can probe multiple functions by one fprobe. + This option enables kernel function probe (fprobe) based on ftrace. + The fprobe is similar to kprobes, but probes only for kernel function + entries and exits. This also can probe multiple functions by one + fprobe. If unsure, say N. diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 7e8ceee339a0..38073632bfe4 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -8,12 +8,22 @@ #include #include #include +#include #include #include +#include "trace.h" + +struct fprobe_rethook_node { + struct rethook_node node; + unsigned long entry_ip; +}; + static void fprobe_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { + struct fprobe_rethook_node *fpr; + struct rethook_node *rh; struct fprobe *fp; int bit; @@ -30,10 +40,37 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, if (fp->entry_handler) fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); + if (fp->exit_handler) { + rh = rethook_try_get(fp->rethook); + if (!rh) { + fp->nmissed++; + goto out; + } + fpr = container_of(rh, struct fprobe_rethook_node, node); + fpr->entry_ip = ip; + rethook_hook(rh, ftrace_get_regs(fregs), true); + } + +out: ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(fprobe_handler); +static void fprobe_exit_handler(struct rethook_node *rh, void *data, + struct pt_regs *regs) +{ + struct fprobe *fp = (struct fprobe *)data; + struct fprobe_rethook_node *fpr; + + if (!fp || fprobe_disabled(fp)) + return; + + fpr = container_of(rh, struct fprobe_rethook_node, node); + + fp->exit_handler(fp, fpr->entry_ip, regs); +} +NOKPROBE_SYMBOL(fprobe_exit_handler); + /* Convert ftrace location address from symbols */ static unsigned long *get_ftrace_locations(const char **syms, int num) { @@ -77,6 +114,48 @@ static void fprobe_init(struct fprobe *fp) fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; } +static int fprobe_init_rethook(struct fprobe *fp, int num) +{ + int i, size; + + if (num < 0) + return -EINVAL; + + if (!fp->exit_handler) { + fp->rethook = NULL; + return 0; + } + + /* Initialize rethook if needed */ + size = num * num_possible_cpus() * 2; + if (size < 0) + return -E2BIG; + + fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); + for (i = 0; i < size; i++) { + struct rethook_node *node; + + node = kzalloc(sizeof(struct fprobe_rethook_node), GFP_KERNEL); + if (!node) { + rethook_free(fp->rethook); + fp->rethook = NULL; + return -ENOMEM; + } + rethook_add_node(fp->rethook, node); + } + return 0; +} + +static void fprobe_fail_cleanup(struct fprobe *fp) +{ + if (fp->rethook) { + /* Don't need to cleanup rethook->handler because this is not used. */ + rethook_free(fp->rethook); + fp->rethook = NULL; + } + ftrace_free_filter(&fp->ops); +} + /** * register_fprobe() - Register fprobe to ftrace by pattern. * @fp: A fprobe data structure to be registered. @@ -90,6 +169,7 @@ static void fprobe_init(struct fprobe *fp) */ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) { + struct ftrace_hash *hash; unsigned char *str; int ret, len; @@ -114,10 +194,21 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter goto out; } - ret = register_ftrace_function(&fp->ops); + /* TODO: + * correctly calculate the total number of filtered symbols + * from both filter and notfilter. + */ + hash = fp->ops.local_hash.filter_hash; + if (WARN_ON_ONCE(!hash)) + goto out; + + ret = fprobe_init_rethook(fp, (int)hash->count); + if (!ret) + ret = register_ftrace_function(&fp->ops); + out: if (ret) - ftrace_free_filter(&fp->ops); + fprobe_fail_cleanup(fp); return ret; } EXPORT_SYMBOL_GPL(register_fprobe); @@ -145,12 +236,15 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) fprobe_init(fp); ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0); + if (ret) + return ret; + + ret = fprobe_init_rethook(fp, num); if (!ret) ret = register_ftrace_function(&fp->ops); if (ret) - ftrace_free_filter(&fp->ops); - + fprobe_fail_cleanup(fp); return ret; } EXPORT_SYMBOL_GPL(register_fprobe_ips); @@ -201,10 +295,20 @@ int unregister_fprobe(struct fprobe *fp) if (!fp || fp->ops.func != fprobe_handler) return -EINVAL; + /* + * rethook_free() starts disabling the rethook, but the rethook handlers + * may be running on other processors at this point. To make sure that all + * current running handlers are finished, call unregister_ftrace_function() + * after this. + */ + if (fp->rethook) + rethook_free(fp->rethook); + ret = unregister_ftrace_function(&fp->ops); + if (ret < 0) + return ret; - if (!ret) - ftrace_free_filter(&fp->ops); + ftrace_free_filter(&fp->ops); return ret; } -- cgit v1.2.3-71-gd317 From ab51e15d535e07be9839e0df056a4ebe9c5bac83 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:02:11 +0900 Subject: fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe Introduce FPROBE_FL_KPROBE_SHARED flag for sharing fprobe callback with kprobes safely from the viewpoint of recursion. Since the recursion safety of the fprobe (and ftrace) is a bit different from the kprobes, this may cause an issue if user wants to run the same code from the fprobe and the kprobes. The kprobes has per-cpu 'current_kprobe' variable which protects the kprobe handler from recursion in any case. On the other hand, the fprobe uses only ftrace_test_recursion_trylock(), which will allow interrupt context calls another (or same) fprobe during the fprobe user handler is running. This is not a matter in cases if the common callback shared among the kprobes and the fprobe has its own recursion detection, or it can handle the recursion in the different contexts (normal/interrupt/NMI.) But if it relies on the 'current_kprobe' recursion lock, it has to check kprobe_running() and use kprobe_busy_*() APIs. Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED *before* registering the fprobe, like; fprobe.flags = FPROBE_FL_KPROBE_SHARED; register_fprobe(&fprobe, "func*", NULL); This will protect your common callback from the nested call. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735293127.1084943.15687374237275817599.stgit@devnote2 --- include/linux/fprobe.h | 12 ++++++++++++ include/linux/kprobes.h | 3 +++ kernel/trace/fprobe.c | 19 ++++++++++++++++++- 3 files changed, 33 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 8eefec2b485e..1c2bde0ead73 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -34,13 +34,25 @@ struct fprobe { void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); }; +/* This fprobe is soft-disabled. */ #define FPROBE_FL_DISABLED 1 +/* + * This fprobe handler will be shared with kprobes. + * This flag must be set before registering. + */ +#define FPROBE_FL_KPROBE_SHARED 2 + static inline bool fprobe_disabled(struct fprobe *fp) { return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; } +static inline bool fprobe_shared_with_kprobes(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false; +} + #ifdef CONFIG_FPROBE int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 19b884353b15..5f1859836deb 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -427,6 +427,9 @@ static inline struct kprobe *kprobe_running(void) { return NULL; } +#define kprobe_busy_begin() do {} while (0) +#define kprobe_busy_end() do {} while (0) + static inline int register_kprobe(struct kprobe *p) { return -EOPNOTSUPP; diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 38073632bfe4..8b2dd5b9dcd1 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -56,6 +56,20 @@ out: } NOKPROBE_SYMBOL(fprobe_handler); +static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp = container_of(ops, struct fprobe, ops); + + if (unlikely(kprobe_running())) { + fp->nmissed++; + return; + } + kprobe_busy_begin(); + fprobe_handler(ip, parent_ip, ops, fregs); + kprobe_busy_end(); +} + static void fprobe_exit_handler(struct rethook_node *rh, void *data, struct pt_regs *regs) { @@ -110,7 +124,10 @@ error: static void fprobe_init(struct fprobe *fp) { fp->nmissed = 0; - fp->ops.func = fprobe_handler; + if (fprobe_shared_with_kprobes(fp)) + fp->ops.func = fprobe_kprobe_handler; + else + fp->ops.func = fprobe_handler; fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; } -- cgit v1.2.3-71-gd317 From a0019cd7d41a191253859349535d76ee28ab7d96 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:07 +0100 Subject: lib/sort: Add priv pointer to swap function Adding support to have priv pointer in swap callback function. Following the initial change on cmp callback functions [1] and adding SWAP_WRAPPER macro to identify sort call of sort_r. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Reviewed-by: Masami Hiramatsu Link: https://lore.kernel.org/bpf/20220316122419.933957-2-jolsa@kernel.org [1] 4333fb96ca10 ("media: lib/sort.c: implement sort() variant taking context argument") --- include/linux/sort.h | 2 +- include/linux/types.h | 1 + lib/sort.c | 40 ++++++++++++++++++++++++++++++---------- 3 files changed, 32 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sort.h b/include/linux/sort.h index b5898725fe9d..e163287ac6c1 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -6,7 +6,7 @@ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, - swap_func_t swap_func, + swap_r_func_t swap_func, const void *priv); void sort(void *base, size_t num, size_t size, diff --git a/include/linux/types.h b/include/linux/types.h index ac825ad90e44..ea8cf60a8a79 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -226,6 +226,7 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); +typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv); typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); diff --git a/lib/sort.c b/lib/sort.c index aa18153864d2..b399bf10d675 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -122,16 +122,27 @@ static void swap_bytes(void *a, void *b, size_t n) * a pointer, but small integers make for the smallest compare * instructions. */ -#define SWAP_WORDS_64 (swap_func_t)0 -#define SWAP_WORDS_32 (swap_func_t)1 -#define SWAP_BYTES (swap_func_t)2 +#define SWAP_WORDS_64 (swap_r_func_t)0 +#define SWAP_WORDS_32 (swap_r_func_t)1 +#define SWAP_BYTES (swap_r_func_t)2 +#define SWAP_WRAPPER (swap_r_func_t)3 + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; /* * The function pointer is last to make tail calls most efficient if the * compiler decides not to inline this function. */ -static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) +static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv) { + if (swap_func == SWAP_WRAPPER) { + ((const struct wrapper *)priv)->swap(a, b, (int)size); + return; + } + if (swap_func == SWAP_WORDS_64) swap_words_64(a, b, size); else if (swap_func == SWAP_WORDS_32) @@ -139,7 +150,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) else if (swap_func == SWAP_BYTES) swap_bytes(a, b, size); else - swap_func(a, b, (int)size); + swap_func(a, b, (int)size, priv); } #define _CMP_WRAPPER ((cmp_r_func_t)0L) @@ -147,7 +158,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) { if (cmp == _CMP_WRAPPER) - return ((cmp_func_t)(priv))(a, b); + return ((const struct wrapper *)priv)->cmp(a, b); return cmp(a, b, priv); } @@ -198,7 +209,7 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) */ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, - swap_func_t swap_func, + swap_r_func_t swap_func, const void *priv) { /* pre-scale counters for performance */ @@ -208,6 +219,10 @@ void sort_r(void *base, size_t num, size_t size, if (!a) /* num < 2 || size == 0 */ return; + /* called from 'sort' without swap function, let's pick the default */ + if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap) + swap_func = NULL; + if (!swap_func) { if (is_aligned(base, size, 8)) swap_func = SWAP_WORDS_64; @@ -230,7 +245,7 @@ void sort_r(void *base, size_t num, size_t size, if (a) /* Building heap: sift down --a */ a -= size; else if (n -= size) /* Sorting: Extract root to --n */ - do_swap(base, base + n, size, swap_func); + do_swap(base, base + n, size, swap_func, priv); else /* Sort complete */ break; @@ -257,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size, c = b; /* Where "a" belongs */ while (b != a) { /* Shift it into place */ b = parent(b, lsbit, size); - do_swap(base + b, base + c, size, swap_func); + do_swap(base + b, base + c, size, swap_func, priv); } } } @@ -267,6 +282,11 @@ void sort(void *base, size_t num, size_t size, cmp_func_t cmp_func, swap_func_t swap_func) { - return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func); + struct wrapper w = { + .cmp = cmp_func, + .swap = swap_func, + }; + + return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w); } EXPORT_SYMBOL(sort); -- cgit v1.2.3-71-gd317 From 0dcac272540613d41c05e89679e4ddb978b612f1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:09 +0100 Subject: bpf: Add multi kprobe link Adding new link type BPF_LINK_TYPE_KPROBE_MULTI that attaches kprobe program through fprobe API. The fprobe API allows to attach probe on multiple functions at once very fast, because it works on top of ftrace. On the other hand this limits the probe point to the function entry or return. The kprobe program gets the same pt_regs input ctx as when it's attached through the perf API. Adding new attach type BPF_TRACE_KPROBE_MULTI that allows attachment kprobe to multiple function with new link. User provides array of addresses or symbols with count to attach the kprobe program to. The new link_create uapi interface looks like: struct { __u32 flags; __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; } kprobe_multi; The flags field allows single BPF_TRACE_KPROBE_MULTI bit to create return multi kprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-4-jolsa@kernel.org --- include/linux/bpf_types.h | 1 + include/linux/trace_events.h | 7 ++ include/uapi/linux/bpf.h | 13 +++ kernel/bpf/syscall.c | 26 ++++- kernel/trace/bpf_trace.c | 211 +++++++++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 13 +++ 6 files changed, 266 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 48a91c51c015..3e24ad0c4b3c 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -140,3 +140,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif +BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index dcea51fb60e2..8f0e9e7cb493 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -15,6 +15,7 @@ struct array_buffer; struct tracer; struct dentry; struct bpf_prog; +union bpf_attr; const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, @@ -738,6 +739,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -779,6 +781,11 @@ static inline int bpf_get_perf_event_info(const struct perf_event *event, { return -EOPNOTSUPP; } +static inline int +bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9beb585be5a6..b8bb67ee6c57 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -32,6 +32,7 @@ #include #include #include +#include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@ -3022,6 +3023,11 @@ out_put_file: fput(perf_file); return err; } +#else +static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PERF_EVENTS */ #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd @@ -4255,7 +4261,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len +#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.addrs static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; @@ -4279,7 +4285,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = tracing_bpf_link_attach(attr, uattr, prog); goto out; case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_KPROBE: case BPF_PROG_TYPE_TRACEPOINT: if (attr->link_create.attach_type != BPF_PERF_EVENT) { ret = -EINVAL; @@ -4287,6 +4292,14 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) } ptype = prog->type; break; + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type != BPF_PERF_EVENT && + attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { + ret = -EINVAL; + goto out; + } + ptype = prog->type; + break; default: ptype = attach_type_to_prog_type(attr->link_create.attach_type); if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { @@ -4318,13 +4331,16 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = bpf_xdp_link_attach(attr, prog); break; #endif -#ifdef CONFIG_PERF_EVENTS case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_TRACEPOINT: - case BPF_PROG_TYPE_KPROBE: ret = bpf_perf_link_attach(attr, prog); break; -#endif + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type == BPF_PERF_EVENT) + ret = bpf_perf_link_attach(attr, prog); + else + ret = bpf_kprobe_multi_link_attach(attr, prog); + break; default: ret = -EINVAL; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a2024ba32a20..fffa2171fae4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -2181,3 +2182,213 @@ static int __init bpf_event_init(void) fs_initcall(bpf_event_init); #endif /* CONFIG_MODULES */ + +#ifdef CONFIG_FPROBE +struct bpf_kprobe_multi_link { + struct bpf_link link; + struct fprobe fp; + unsigned long *addrs; +}; + +static void bpf_kprobe_multi_link_release(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + unregister_fprobe(&kmulti_link->fp); +} + +static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + kvfree(kmulti_link->addrs); + kfree(kmulti_link); +} + +static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { + .release = bpf_kprobe_multi_link_release, + .dealloc = bpf_kprobe_multi_link_dealloc, +}; + +static int +kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, + struct pt_regs *regs) +{ + int err; + + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { + err = 0; + goto out; + } + + migrate_disable(); + rcu_read_lock(); + err = bpf_prog_run(link->link.prog, regs); + rcu_read_unlock(); + migrate_enable(); + + out: + __this_cpu_dec(bpf_prog_active); + return err; +} + +static void +kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, + struct pt_regs *regs) +{ + unsigned long saved_ip = instruction_pointer(regs); + struct bpf_kprobe_multi_link *link; + + /* + * Because fprobe's regs->ip is set to the next instruction of + * dynamic-ftrace instruction, correct entry ip must be set, so + * that the bpf program can access entry address via regs as same + * as kprobes. + * + * Both kprobe and kretprobe see the entry ip of traced function + * as instruction pointer. + */ + instruction_pointer_set(regs, entry_ip); + + link = container_of(fp, struct bpf_kprobe_multi_link, fp); + kprobe_multi_link_prog_run(link, regs); + + instruction_pointer_set(regs, saved_ip); +} + +static int +kprobe_multi_resolve_syms(const void *usyms, u32 cnt, + unsigned long *addrs) +{ + unsigned long addr, size; + const char **syms; + int err = -ENOMEM; + unsigned int i; + char *func; + + size = cnt * sizeof(*syms); + syms = kvzalloc(size, GFP_KERNEL); + if (!syms) + return -ENOMEM; + + func = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); + if (!func) + goto error; + + if (copy_from_user(syms, usyms, size)) { + err = -EFAULT; + goto error; + } + + for (i = 0; i < cnt; i++) { + err = strncpy_from_user(func, syms[i], KSYM_NAME_LEN); + if (err == KSYM_NAME_LEN) + err = -E2BIG; + if (err < 0) + goto error; + err = -EINVAL; + addr = kallsyms_lookup_name(func); + if (!addr) + goto error; + if (!kallsyms_lookup_size_offset(addr, &size, NULL)) + goto error; + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) + goto error; + addrs[i] = addr; + } + + err = 0; +error: + kvfree(syms); + kfree(func); + return err; +} + +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_kprobe_multi_link *link = NULL; + struct bpf_link_primer link_primer; + unsigned long *addrs; + u32 flags, cnt, size; + void __user *uaddrs; + void __user *usyms; + int err; + + /* no support for 32bit archs yet */ + if (sizeof(u64) != sizeof(void *)) + return -EOPNOTSUPP; + + if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + + flags = attr->link_create.kprobe_multi.flags; + if (flags & ~BPF_F_KPROBE_MULTI_RETURN) + return -EINVAL; + + uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); + usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); + if (!!uaddrs == !!usyms) + return -EINVAL; + + cnt = attr->link_create.kprobe_multi.cnt; + if (!cnt) + return -EINVAL; + + size = cnt * sizeof(*addrs); + addrs = kvmalloc(size, GFP_KERNEL); + if (!addrs) + return -ENOMEM; + + if (uaddrs) { + if (copy_from_user(addrs, uaddrs, size)) { + err = -EFAULT; + goto error; + } + } else { + err = kprobe_multi_resolve_syms(usyms, cnt, addrs); + if (err) + goto error; + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto error; + } + + bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, + &bpf_kprobe_multi_link_lops, prog); + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto error; + + if (flags & BPF_F_KPROBE_MULTI_RETURN) + link->fp.exit_handler = kprobe_multi_link_handler; + else + link->fp.entry_handler = kprobe_multi_link_handler; + + link->addrs = addrs; + + err = register_fprobe_ips(&link->fp, addrs, cnt); + if (err) { + bpf_link_cleanup(&link_primer); + return err; + } + + return bpf_link_settle(&link_primer); + +error: + kfree(link); + kvfree(addrs); + return err; +} +#else /* !CONFIG_FPROBE */ +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; -- cgit v1.2.3-71-gd317 From 7a19006b60b129ce2cbe787f4f910dc0ec5a1ec6 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 18 Mar 2022 08:34:52 +0100 Subject: kernfs: remove unneeded #if 0 guard Commit f2eb478f2f32 ("kernfs: move struct kernfs_root out of the public view.") moved kernfs_root out of kernfs.h, but my debugging code of a #if 0 was left in accidentally. Fix that up by removing the guards. Fixes: f2eb478f2f32 ("kernfs: move struct kernfs_root out of the public view.") Cc: Tejun Heo Reported-by: Al Viro Link: https://lore.kernel.org/r/20220318073452.1486568-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kernfs.h | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 62aff082dc3f..e2ae15a6225e 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -185,26 +185,6 @@ struct kernfs_syscall_ops { struct kernfs_root *root); }; -#if 0 -struct kernfs_root { - /* published fields */ - struct kernfs_node *kn; - unsigned int flags; /* KERNFS_ROOT_* flags */ - - /* private fields, do not use outside kernfs proper */ - struct idr ino_idr; - u32 last_id_lowbits; - u32 id_highbits; - struct kernfs_syscall_ops *syscall_ops; - - /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ - struct list_head supers; - - wait_queue_head_t deactivate_waitq; - struct rw_semaphore kernfs_rwsem; -}; -#endif - struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root); struct kernfs_open_file { -- cgit v1.2.3-71-gd317 From e9b57aaae605eb869a628fdc21fe7d2c77a2205d Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Wed, 9 Feb 2022 14:00:47 +0800 Subject: fscache: export fscache_end_operation() Export fscache_end_operation() to avoid code duplication. Besides, considering the paired fscache_begin_read_operation() is already exported, it shall make sense to also export fscache_end_operation(). Signed-off-by: Jeffle Xu Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/20220302125134.131039-2-jefflexu@linux.alibaba.com/ # Jeffle's v4 Link: https://lore.kernel.org/r/164622971432.3564931.12184135678781328146.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678190346.1200972.7453733431978569479.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692888334.2099075.5166283293894267365.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/20220316131723.111553-2-jefflexu@linux.alibaba.com/ # v5 --- fs/cifs/fscache.c | 8 -------- fs/fscache/internal.h | 11 ----------- fs/nfs/fscache.c | 8 -------- include/linux/fscache.h | 14 ++++++++++++++ 4 files changed, 14 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index 33af72e0ac0c..b47c2011ce5b 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -134,14 +134,6 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) } } -static inline void fscache_end_operation(struct netfs_cache_resources *cres) -{ - const struct netfs_cache_ops *ops = fscache_operation_valid(cres); - - if (ops) - ops->end_operation(cres); -} - /* * Fallback page reading interface. */ diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index f121c21590dc..ed1c9ed737f2 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -70,17 +70,6 @@ static inline void fscache_see_cookie(struct fscache_cookie *cookie, where); } -/* - * io.c - */ -static inline void fscache_end_operation(struct netfs_cache_resources *cres) -{ - const struct netfs_cache_ops *ops = fscache_operation_valid(cres); - - if (ops) - ops->end_operation(cres); -} - /* * main.c */ diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index cfe901650ab0..39654ca72d3d 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -249,14 +249,6 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp) } } -static inline void fscache_end_operation(struct netfs_cache_resources *cres) -{ - const struct netfs_cache_ops *ops = fscache_operation_valid(cres); - - if (ops) - ops->end_operation(cres); -} - /* * Fallback page reading interface. */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 296c5f1d9f35..d2430da8aa67 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -456,6 +456,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres, return -ENOBUFS; } +/** + * fscache_end_operation - End the read operation for the netfs lib + * @cres: The cache resources for the read operation + * + * Clean up the resources at the end of the read request. + */ +static inline void fscache_end_operation(struct netfs_cache_resources *cres) +{ + const struct netfs_cache_ops *ops = fscache_operation_valid(cres); + + if (ops) + ops->end_operation(cres); +} + /** * fscache_read - Start a read from the cache. * @cres: The cache resources to use -- cgit v1.2.3-71-gd317 From 6a19114b8e7f1e24d80b0812e26d78d7ae1ec6dd Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 17 Feb 2022 10:01:23 +0000 Subject: netfs: Rename netfs_read_*request to netfs_io_*request Rename netfs_read_*request to netfs_io_*request so that the same structures can be used for the write helpers too. perl -p -i -e 's/netfs_read_(request|subrequest)/netfs_io_$1/g' \ `git grep -l 'netfs_read_\(sub\|\)request'` perl -p -i -e 's/nr_rd_ops/nr_outstanding/g' \ `git grep -l nr_rd_ops` perl -p -i -e 's/nr_wr_ops/nr_copy_ops/g' \ `git grep -l nr_wr_ops` perl -p -i -e 's/netfs_read_source/netfs_io_source/g' \ `git grep -l 'netfs_read_source'` perl -p -i -e 's/netfs_io_request_ops/netfs_request_ops/g' \ `git grep -l 'netfs_io_request_ops'` perl -p -i -e 's/init_rreq/init_request/g' \ `git grep -l 'init_rreq'` Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622988070.3564931.7089670190434315183.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678195157.1200972.366609966927368090.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692891535.2099075.18435198075367420588.stgit@warthog.procyon.org.uk/ # v3 --- Documentation/filesystems/netfs_library.rst | 40 +++--- fs/9p/vfs_addr.c | 16 +-- fs/afs/file.c | 12 +- fs/afs/internal.h | 4 +- fs/cachefiles/io.c | 6 +- fs/ceph/addr.c | 16 +-- fs/ceph/cache.h | 4 +- fs/netfs/read_helper.c | 194 ++++++++++++++-------------- include/linux/netfs.h | 42 +++--- include/trace/events/cachefiles.h | 6 +- include/trace/events/netfs.h | 14 +- 11 files changed, 177 insertions(+), 177 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 4f373a8ec47b..a997e2d4321d 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -71,11 +71,11 @@ Read Helper Functions Three read helpers are provided:: void netfs_readahead(struct readahead_control *ractl, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv); int netfs_readpage(struct file *file, struct folio *folio, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv); int netfs_write_begin(struct file *file, struct address_space *mapping, @@ -84,7 +84,7 @@ Three read helpers are provided:: unsigned int flags, struct folio **_folio, void **_fsdata, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv); Each corresponds to a VM operation, with the addition of a couple of parameters @@ -116,7 +116,7 @@ occurs, the request will get partially completed if sufficient data is read. Additionally, there is:: - * void netfs_subreq_terminated(struct netfs_read_subrequest *subreq, + * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, ssize_t transferred_or_error, bool was_async); @@ -132,7 +132,7 @@ Read Helper Structures The read helpers make use of a couple of structures to maintain the state of the read. The first is a structure that manages a read request as a whole:: - struct netfs_read_request { + struct netfs_io_request { struct inode *inode; struct address_space *mapping; struct netfs_cache_resources cache_resources; @@ -140,7 +140,7 @@ the read. The first is a structure that manages a read request as a whole:: loff_t start; size_t len; loff_t i_size; - const struct netfs_read_request_ops *netfs_ops; + const struct netfs_request_ops *netfs_ops; unsigned int debug_id; ... }; @@ -187,8 +187,8 @@ The above fields are the ones the netfs can use. They are: The second structure is used to manage individual slices of the overall read request:: - struct netfs_read_subrequest { - struct netfs_read_request *rreq; + struct netfs_io_subrequest { + struct netfs_io_request *rreq; loff_t start; size_t len; size_t transferred; @@ -244,23 +244,23 @@ Read Helper Operations The network filesystem must provide the read helpers with a table of operations through which it can issue requests and negotiate:: - struct netfs_read_request_ops { - void (*init_rreq)(struct netfs_read_request *rreq, struct file *file); + struct netfs_request_ops { + void (*init_request)(struct netfs_io_request *rreq, struct file *file); bool (*is_cache_enabled)(struct inode *inode); - int (*begin_cache_operation)(struct netfs_read_request *rreq); - void (*expand_readahead)(struct netfs_read_request *rreq); - bool (*clamp_length)(struct netfs_read_subrequest *subreq); - void (*issue_op)(struct netfs_read_subrequest *subreq); - bool (*is_still_valid)(struct netfs_read_request *rreq); + int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*expand_readahead)(struct netfs_io_request *rreq); + bool (*clamp_length)(struct netfs_io_subrequest *subreq); + void (*issue_op)(struct netfs_io_subrequest *subreq); + bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); - void (*done)(struct netfs_read_request *rreq); + void (*done)(struct netfs_io_request *rreq); void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; The operations are as follows: - * ``init_rreq()`` + * ``init_request()`` [Optional] This is called to initialise the request structure. It is given the file for reference and can modify the ->netfs_priv value. @@ -420,12 +420,12 @@ The network filesystem's ->begin_cache_operation() method is called to set up a cache and this must call into the cache to do the work. If using fscache, for example, the cache would call:: - int fscache_begin_read_operation(struct netfs_read_request *rreq, + int fscache_begin_read_operation(struct netfs_io_request *rreq, struct fscache_cookie *cookie); passing in the request pointer and the cookie corresponding to the file. -The netfs_read_request object contains a place for the cache to hang its +The netfs_io_request object contains a place for the cache to hang its state:: struct netfs_cache_resources { @@ -443,7 +443,7 @@ operation table looks like the following:: void (*expand_readahead)(struct netfs_cache_resources *cres, loff_t *_start, size_t *_len, loff_t i_size); - enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq, + enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, loff_t i_size); int (*read)(struct netfs_cache_resources *cres, diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 9a10e68c5f30..7b79fabe7593 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -31,9 +31,9 @@ * v9fs_req_issue_op - Issue a read from 9P * @subreq: The read to make */ -static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq) +static void v9fs_req_issue_op(struct netfs_io_subrequest *subreq) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct p9_fid *fid = rreq->netfs_priv; struct iov_iter to; loff_t pos = subreq->start + subreq->transferred; @@ -52,11 +52,11 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq) } /** - * v9fs_init_rreq - Initialise a read request + * v9fs_init_request - Initialise a read request * @rreq: The read request * @file: The file being read from */ -static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file) +static void v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { struct p9_fid *fid = file->private_data; @@ -65,7 +65,7 @@ static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file) } /** - * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq + * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request * @mapping: unused mapping of request to cleanup * @priv: private data to cleanup, a fid, guaranted non-null. */ @@ -91,7 +91,7 @@ static bool v9fs_is_cache_enabled(struct inode *inode) * v9fs_begin_cache_operation - Begin a cache operation for a read * @rreq: The read request */ -static int v9fs_begin_cache_operation(struct netfs_read_request *rreq) +static int v9fs_begin_cache_operation(struct netfs_io_request *rreq) { #ifdef CONFIG_9P_FSCACHE struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode)); @@ -102,8 +102,8 @@ static int v9fs_begin_cache_operation(struct netfs_read_request *rreq) #endif } -static const struct netfs_read_request_ops v9fs_req_ops = { - .init_rreq = v9fs_init_rreq, +static const struct netfs_request_ops v9fs_req_ops = { + .init_request = v9fs_init_request, .is_cache_enabled = v9fs_is_cache_enabled, .begin_cache_operation = v9fs_begin_cache_operation, .issue_op = v9fs_req_issue_op, diff --git a/fs/afs/file.c b/fs/afs/file.c index 720818a7c166..e55761f8858c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -240,7 +240,7 @@ void afs_put_read(struct afs_read *req) static void afs_fetch_data_notify(struct afs_operation *op) { struct afs_read *req = op->fetch.req; - struct netfs_read_subrequest *subreq = req->subreq; + struct netfs_io_subrequest *subreq = req->subreq; int error = op->error; if (error == -ECONNABORTED) @@ -310,7 +310,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) return afs_do_sync_operation(op); } -static void afs_req_issue_op(struct netfs_read_subrequest *subreq) +static void afs_req_issue_op(struct netfs_io_subrequest *subreq) { struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); struct afs_read *fsreq; @@ -359,7 +359,7 @@ static int afs_symlink_readpage(struct file *file, struct page *page) return ret; } -static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file) +static void afs_init_request(struct netfs_io_request *rreq, struct file *file) { rreq->netfs_priv = key_get(afs_file_key(file)); } @@ -371,7 +371,7 @@ static bool afs_is_cache_enabled(struct inode *inode) return fscache_cookie_enabled(cookie) && cookie->cache_priv; } -static int afs_begin_cache_operation(struct netfs_read_request *rreq) +static int afs_begin_cache_operation(struct netfs_io_request *rreq) { #ifdef CONFIG_AFS_FSCACHE struct afs_vnode *vnode = AFS_FS_I(rreq->inode); @@ -396,8 +396,8 @@ static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) key_put(netfs_priv); } -const struct netfs_read_request_ops afs_req_ops = { - .init_rreq = afs_init_rreq, +const struct netfs_request_ops afs_req_ops = { + .init_request = afs_init_request, .is_cache_enabled = afs_is_cache_enabled, .begin_cache_operation = afs_begin_cache_operation, .check_write_begin = afs_check_write_begin, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index b6f02321fc09..c56a0e1719ae 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -207,7 +207,7 @@ struct afs_read { loff_t file_size; /* File size returned by server */ struct key *key; /* The key to use to reissue the read */ struct afs_vnode *vnode; /* The file being read into. */ - struct netfs_read_subrequest *subreq; /* Fscache helper read request this belongs to */ + struct netfs_io_subrequest *subreq; /* Fscache helper read request this belongs to */ afs_dataversion_t data_version; /* Version number returned by server */ refcount_t usage; unsigned int call_debug_id; @@ -1063,7 +1063,7 @@ extern const struct address_space_operations afs_file_aops; extern const struct address_space_operations afs_symlink_aops; extern const struct inode_operations afs_file_inode_operations; extern const struct file_operations afs_file_operations; -extern const struct netfs_read_request_ops afs_req_ops; +extern const struct netfs_request_ops afs_req_ops; extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *); extern void afs_put_wb_key(struct afs_wb_key *); diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 753986ea1583..6ac6fdbc70d3 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -382,18 +382,18 @@ presubmission_error: * Prepare a read operation, shortening it to a cached/uncached * boundary as appropriate. */ -static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq, +static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq, loff_t i_size) { enum cachefiles_prepare_read_trace why; - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct netfs_cache_resources *cres = &rreq->cache_resources; struct cachefiles_object *object; struct cachefiles_cache *cache; struct fscache_cookie *cookie = fscache_cres_cookie(cres); const struct cred *saved_cred; struct file *file = cachefiles_cres_file(cres); - enum netfs_read_source ret = NETFS_DOWNLOAD_FROM_SERVER; + enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER; loff_t off, to; ino_t ino = file ? file_inode(file)->i_ino : 0; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 46e0881ae8b2..9d995f351079 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -183,7 +183,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp) return 1; } -static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) +static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) { struct inode *inode = rreq->inode; struct ceph_inode_info *ci = ceph_inode(inode); @@ -200,7 +200,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) rreq->len = roundup(rreq->len, lo->stripe_unit); } -static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) +static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq) { struct inode *inode = subreq->rreq->inode; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); @@ -219,7 +219,7 @@ static void finish_netfs_read(struct ceph_osd_request *req) { struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); - struct netfs_read_subrequest *subreq = req->r_priv; + struct netfs_io_subrequest *subreq = req->r_priv; int num_pages; int err = req->r_result; @@ -245,9 +245,9 @@ static void finish_netfs_read(struct ceph_osd_request *req) iput(req->r_inode); } -static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq) +static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct inode *inode = rreq->inode; struct ceph_mds_reply_info_parsed *rinfo; struct ceph_mds_reply_info_in *iinfo; @@ -298,9 +298,9 @@ out: return true; } -static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) +static void ceph_netfs_issue_op(struct netfs_io_subrequest *subreq) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct inode *inode = rreq->inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); @@ -364,7 +364,7 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) ceph_put_cap_refs(ci, got); } -static const struct netfs_read_request_ops ceph_netfs_read_ops = { +static const struct netfs_request_ops ceph_netfs_read_ops = { .is_cache_enabled = ceph_is_cache_enabled, .begin_cache_operation = ceph_begin_cache_operation, .issue_op = ceph_netfs_issue_op, diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 09164389fa66..b8b3b5cb6438 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -62,7 +62,7 @@ static inline int ceph_fscache_set_page_dirty(struct page *page) return fscache_set_page_dirty(page, ceph_fscache_cookie(ci)); } -static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) +static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq) { struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode)); @@ -143,7 +143,7 @@ static inline bool ceph_is_cache_enabled(struct inode *inode) return false; } -static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) +static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq) { return -ENOBUFS; } diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 501da990c259..50035d93f1dc 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -28,23 +28,23 @@ module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask"); static void netfs_rreq_work(struct work_struct *); -static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool); +static void __netfs_put_subrequest(struct netfs_io_subrequest *, bool); -static void netfs_put_subrequest(struct netfs_read_subrequest *subreq, +static void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async) { if (refcount_dec_and_test(&subreq->usage)) __netfs_put_subrequest(subreq, was_async); } -static struct netfs_read_request *netfs_alloc_read_request( - const struct netfs_read_request_ops *ops, void *netfs_priv, +static struct netfs_io_request *netfs_alloc_read_request( + const struct netfs_request_ops *ops, void *netfs_priv, struct file *file) { static atomic_t debug_ids; - struct netfs_read_request *rreq; + struct netfs_io_request *rreq; - rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL); + rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); if (rreq) { rreq->netfs_ops = ops; rreq->netfs_priv = netfs_priv; @@ -55,27 +55,27 @@ static struct netfs_read_request *netfs_alloc_read_request( INIT_WORK(&rreq->work, netfs_rreq_work); refcount_set(&rreq->usage, 1); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); - if (ops->init_rreq) - ops->init_rreq(rreq, file); + if (ops->init_request) + ops->init_request(rreq, file); netfs_stat(&netfs_n_rh_rreq); } return rreq; } -static void netfs_get_read_request(struct netfs_read_request *rreq) +static void netfs_get_read_request(struct netfs_io_request *rreq) { refcount_inc(&rreq->usage); } -static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq, +static void netfs_rreq_clear_subreqs(struct netfs_io_request *rreq, bool was_async) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; while (!list_empty(&rreq->subrequests)) { subreq = list_first_entry(&rreq->subrequests, - struct netfs_read_subrequest, rreq_link); + struct netfs_io_subrequest, rreq_link); list_del(&subreq->rreq_link); netfs_put_subrequest(subreq, was_async); } @@ -83,8 +83,8 @@ static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq, static void netfs_free_read_request(struct work_struct *work) { - struct netfs_read_request *rreq = - container_of(work, struct netfs_read_request, work); + struct netfs_io_request *rreq = + container_of(work, struct netfs_io_request, work); netfs_rreq_clear_subreqs(rreq, false); if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); @@ -95,7 +95,7 @@ static void netfs_free_read_request(struct work_struct *work) netfs_stat_d(&netfs_n_rh_rreq); } -static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async) +static void netfs_put_read_request(struct netfs_io_request *rreq, bool was_async) { if (refcount_dec_and_test(&rreq->usage)) { if (was_async) { @@ -111,12 +111,12 @@ static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_asy /* * Allocate and partially initialise an I/O request structure. */ -static struct netfs_read_subrequest *netfs_alloc_subrequest( - struct netfs_read_request *rreq) +static struct netfs_io_subrequest *netfs_alloc_subrequest( + struct netfs_io_request *rreq) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; - subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL); + subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL); if (subreq) { INIT_LIST_HEAD(&subreq->rreq_link); refcount_set(&subreq->usage, 2); @@ -128,15 +128,15 @@ static struct netfs_read_subrequest *netfs_alloc_subrequest( return subreq; } -static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq) +static void netfs_get_read_subrequest(struct netfs_io_subrequest *subreq) { refcount_inc(&subreq->usage); } -static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq, +static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); @@ -147,7 +147,7 @@ static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq, /* * Clear the unread part of an I/O request. */ -static void netfs_clear_unread(struct netfs_read_subrequest *subreq) +static void netfs_clear_unread(struct netfs_io_subrequest *subreq) { struct iov_iter iter; @@ -160,7 +160,7 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq) static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async) { - struct netfs_read_subrequest *subreq = priv; + struct netfs_io_subrequest *subreq = priv; netfs_subreq_terminated(subreq, transferred_or_error, was_async); } @@ -169,8 +169,8 @@ static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error * Issue a read against the cache. * - Eats the caller's ref on subreq. */ -static void netfs_read_from_cache(struct netfs_read_request *rreq, - struct netfs_read_subrequest *subreq, +static void netfs_read_from_cache(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq, enum netfs_read_from_hole read_hole) { struct netfs_cache_resources *cres = &rreq->cache_resources; @@ -188,8 +188,8 @@ static void netfs_read_from_cache(struct netfs_read_request *rreq, /* * Fill a subrequest region with zeroes. */ -static void netfs_fill_with_zeroes(struct netfs_read_request *rreq, - struct netfs_read_subrequest *subreq) +static void netfs_fill_with_zeroes(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) { netfs_stat(&netfs_n_rh_zero); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); @@ -212,8 +212,8 @@ static void netfs_fill_with_zeroes(struct netfs_read_request *rreq, * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be * cleared. */ -static void netfs_read_from_server(struct netfs_read_request *rreq, - struct netfs_read_subrequest *subreq) +static void netfs_read_from_server(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) { netfs_stat(&netfs_n_rh_download); rreq->netfs_ops->issue_op(subreq); @@ -222,7 +222,7 @@ static void netfs_read_from_server(struct netfs_read_request *rreq, /* * Release those waiting. */ -static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async) +static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) { trace_netfs_rreq(rreq, netfs_rreq_trace_done); netfs_rreq_clear_subreqs(rreq, was_async); @@ -235,10 +235,10 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async * * May be called in softirq mode and we inherit a ref from the caller. */ -static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq, +static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, bool was_async) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; struct folio *folio; pgoff_t unlocked = 0; bool have_unlocked = false; @@ -267,8 +267,8 @@ static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq, static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, bool was_async) { - struct netfs_read_subrequest *subreq = priv; - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_subrequest *subreq = priv; + struct netfs_io_request *rreq = subreq->rreq; if (IS_ERR_VALUE(transferred_or_error)) { netfs_stat(&netfs_n_rh_write_failed); @@ -280,8 +280,8 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, trace_netfs_sreq(subreq, netfs_sreq_trace_write_term); - /* If we decrement nr_wr_ops to 0, the ref belongs to us. */ - if (atomic_dec_and_test(&rreq->nr_wr_ops)) + /* If we decrement nr_copy_ops to 0, the ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_copy_ops)) netfs_rreq_unmark_after_write(rreq, was_async); netfs_put_subrequest(subreq, was_async); @@ -291,10 +291,10 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, * Perform any outstanding writes to the cache. We inherit a ref from the * caller. */ -static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) +static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) { struct netfs_cache_resources *cres = &rreq->cache_resources; - struct netfs_read_subrequest *subreq, *next, *p; + struct netfs_io_subrequest *subreq, *next, *p; struct iov_iter iter; int ret; @@ -303,7 +303,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) /* We don't want terminating writes trying to wake us up whilst we're * still going through the list. */ - atomic_inc(&rreq->nr_wr_ops); + atomic_inc(&rreq->nr_copy_ops); list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) { @@ -334,7 +334,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, subreq->start, subreq->len); - atomic_inc(&rreq->nr_wr_ops); + atomic_inc(&rreq->nr_copy_ops); netfs_stat(&netfs_n_rh_write); netfs_get_read_subrequest(subreq); trace_netfs_sreq(subreq, netfs_sreq_trace_write); @@ -342,20 +342,20 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) netfs_rreq_copy_terminated, subreq); } - /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */ - if (atomic_dec_and_test(&rreq->nr_wr_ops)) + /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_copy_ops)) netfs_rreq_unmark_after_write(rreq, false); } static void netfs_rreq_write_to_cache_work(struct work_struct *work) { - struct netfs_read_request *rreq = - container_of(work, struct netfs_read_request, work); + struct netfs_io_request *rreq = + container_of(work, struct netfs_io_request, work); netfs_rreq_do_write_to_cache(rreq); } -static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq) +static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) { rreq->work.func = netfs_rreq_write_to_cache_work; if (!queue_work(system_unbound_wq, &rreq->work)) @@ -366,9 +366,9 @@ static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq) * Unlock the folios in a read operation. We need to set PG_fscache on any * folios we're going to write back before we unlock them. */ -static void netfs_rreq_unlock(struct netfs_read_request *rreq) +static void netfs_rreq_unlock(struct netfs_io_request *rreq) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; struct folio *folio; unsigned int iopos, account = 0; pgoff_t start_page = rreq->start / PAGE_SIZE; @@ -391,7 +391,7 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq) * mixture inside. */ subreq = list_first_entry(&rreq->subrequests, - struct netfs_read_subrequest, rreq_link); + struct netfs_io_subrequest, rreq_link); iopos = 0; subreq_failed = (subreq->error < 0); @@ -450,8 +450,8 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq) /* * Handle a short read. */ -static void netfs_rreq_short_read(struct netfs_read_request *rreq, - struct netfs_read_subrequest *subreq) +static void netfs_rreq_short_read(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) { __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); @@ -460,7 +460,7 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq, trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); netfs_get_read_subrequest(subreq); - atomic_inc(&rreq->nr_rd_ops); + atomic_inc(&rreq->nr_outstanding); if (subreq->source == NETFS_READ_FROM_CACHE) netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); else @@ -471,9 +471,9 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq, * Resubmit any short or failed operations. Returns true if we got the rreq * ref back. */ -static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) +static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; WARN_ON(in_interrupt()); @@ -482,7 +482,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) /* We don't want terminating submissions trying to wake us up whilst * we're still going through the list. */ - atomic_inc(&rreq->nr_rd_ops); + atomic_inc(&rreq->nr_outstanding); __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { @@ -494,27 +494,27 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); netfs_get_read_subrequest(subreq); - atomic_inc(&rreq->nr_rd_ops); + atomic_inc(&rreq->nr_outstanding); netfs_read_from_server(rreq, subreq); } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) { netfs_rreq_short_read(rreq, subreq); } } - /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */ - if (atomic_dec_and_test(&rreq->nr_rd_ops)) + /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_outstanding)) return true; - wake_up_var(&rreq->nr_rd_ops); + wake_up_var(&rreq->nr_outstanding); return false; } /* * Check to see if the data read is still valid. */ -static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq) +static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq) { - struct netfs_read_subrequest *subreq; + struct netfs_io_subrequest *subreq; if (!rreq->netfs_ops->is_still_valid || rreq->netfs_ops->is_still_valid(rreq)) @@ -534,7 +534,7 @@ static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq) * Note that we could be in an ordinary kernel thread, on a workqueue or in * softirq context at this point. We inherit a ref from the caller. */ -static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async) +static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async) { trace_netfs_rreq(rreq, netfs_rreq_trace_assess); @@ -561,8 +561,8 @@ again: static void netfs_rreq_work(struct work_struct *work) { - struct netfs_read_request *rreq = - container_of(work, struct netfs_read_request, work); + struct netfs_io_request *rreq = + container_of(work, struct netfs_io_request, work); netfs_rreq_assess(rreq, false); } @@ -570,7 +570,7 @@ static void netfs_rreq_work(struct work_struct *work) * Handle the completion of all outstanding I/O operations on a read request. * We inherit a ref from the caller. */ -static void netfs_rreq_terminated(struct netfs_read_request *rreq, +static void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async) { if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) && @@ -600,11 +600,11 @@ static void netfs_rreq_terminated(struct netfs_read_request *rreq, * If @was_async is true, the caller might be running in softirq or interrupt * context and we can't sleep. */ -void netfs_subreq_terminated(struct netfs_read_subrequest *subreq, +void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, ssize_t transferred_or_error, bool was_async) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; int u; _enter("[%u]{%llx,%lx},%zd", @@ -648,12 +648,12 @@ complete: out: trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); - /* If we decrement nr_rd_ops to 0, the ref belongs to us. */ - u = atomic_dec_return(&rreq->nr_rd_ops); + /* If we decrement nr_outstanding to 0, the ref belongs to us. */ + u = atomic_dec_return(&rreq->nr_outstanding); if (u == 0) netfs_rreq_terminated(rreq, was_async); else if (u == 1) - wake_up_var(&rreq->nr_rd_ops); + wake_up_var(&rreq->nr_outstanding); netfs_put_subrequest(subreq, was_async); return; @@ -691,10 +691,10 @@ failed: } EXPORT_SYMBOL(netfs_subreq_terminated); -static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq, +static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq, loff_t i_size) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct netfs_cache_resources *cres = &rreq->cache_resources; if (cres->ops) @@ -707,11 +707,11 @@ static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequ /* * Work out what sort of subrequest the next one will be. */ -static enum netfs_read_source -netfs_rreq_prepare_read(struct netfs_read_request *rreq, - struct netfs_read_subrequest *subreq) +static enum netfs_io_source +netfs_rreq_prepare_read(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) { - enum netfs_read_source source; + enum netfs_io_source source; _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); @@ -748,11 +748,11 @@ out: /* * Slice off a piece of a read request and submit an I/O request for it. */ -static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq, +static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, unsigned int *_debug_index) { - struct netfs_read_subrequest *subreq; - enum netfs_read_source source; + struct netfs_io_subrequest *subreq; + enum netfs_io_source source; subreq = netfs_alloc_subrequest(rreq); if (!subreq) @@ -777,7 +777,7 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq, if (source == NETFS_INVALID_READ) goto subreq_failed; - atomic_inc(&rreq->nr_rd_ops); + atomic_inc(&rreq->nr_outstanding); rreq->submitted += subreq->len; @@ -804,7 +804,7 @@ subreq_failed: return false; } -static void netfs_cache_expand_readahead(struct netfs_read_request *rreq, +static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, loff_t *_start, size_t *_len, loff_t i_size) { struct netfs_cache_resources *cres = &rreq->cache_resources; @@ -813,7 +813,7 @@ static void netfs_cache_expand_readahead(struct netfs_read_request *rreq, cres->ops->expand_readahead(cres, _start, _len, i_size); } -static void netfs_rreq_expand(struct netfs_read_request *rreq, +static void netfs_rreq_expand(struct netfs_io_request *rreq, struct readahead_control *ractl) { /* Give the cache a chance to change the request parameters. The @@ -866,10 +866,10 @@ static void netfs_rreq_expand(struct netfs_read_request *rreq, * This is usable whether or not caching is enabled. */ void netfs_readahead(struct readahead_control *ractl, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv) { - struct netfs_read_request *rreq; + struct netfs_io_request *rreq; unsigned int debug_index = 0; int ret; @@ -897,7 +897,7 @@ void netfs_readahead(struct readahead_control *ractl, netfs_rreq_expand(rreq, ractl); - atomic_set(&rreq->nr_rd_ops, 1); + atomic_set(&rreq->nr_outstanding, 1); do { if (!netfs_rreq_submit_slice(rreq, &debug_index)) break; @@ -910,8 +910,8 @@ void netfs_readahead(struct readahead_control *ractl, while (readahead_folio(ractl)) ; - /* If we decrement nr_rd_ops to 0, the ref belongs to us. */ - if (atomic_dec_and_test(&rreq->nr_rd_ops)) + /* If we decrement nr_outstanding to 0, the ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_outstanding)) netfs_rreq_assess(rreq, false); return; @@ -944,10 +944,10 @@ EXPORT_SYMBOL(netfs_readahead); */ int netfs_readpage(struct file *file, struct folio *folio, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv) { - struct netfs_read_request *rreq; + struct netfs_io_request *rreq; unsigned int debug_index = 0; int ret; @@ -977,19 +977,19 @@ int netfs_readpage(struct file *file, netfs_get_read_request(rreq); - atomic_set(&rreq->nr_rd_ops, 1); + atomic_set(&rreq->nr_outstanding, 1); do { if (!netfs_rreq_submit_slice(rreq, &debug_index)) break; } while (rreq->submitted < rreq->len); - /* Keep nr_rd_ops incremented so that the ref always belongs to us, and + /* Keep nr_outstanding incremented so that the ref always belongs to us, and * the service code isn't punted off to a random thread pool to * process. */ do { - wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1); + wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); @@ -1076,10 +1076,10 @@ zero_out: int netfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int aop_flags, struct folio **_folio, void **_fsdata, - const struct netfs_read_request_ops *ops, + const struct netfs_request_ops *ops, void *netfs_priv) { - struct netfs_read_request *rreq; + struct netfs_io_request *rreq; struct folio *folio; struct inode *inode = file_inode(file); unsigned int debug_index = 0, fgp_flags; @@ -1153,19 +1153,19 @@ retry: while (readahead_folio(&ractl)) ; - atomic_set(&rreq->nr_rd_ops, 1); + atomic_set(&rreq->nr_outstanding, 1); do { if (!netfs_rreq_submit_slice(rreq, &debug_index)) break; } while (rreq->submitted < rreq->len); - /* Keep nr_rd_ops incremented so that the ref always belongs to us, and + /* Keep nr_outstanding incremented so that the ref always belongs to us, and * the service code isn't punted off to a random thread pool to * process. */ for (;;) { - wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1); + wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) break; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 614f22213e21..a2ca91cb7a68 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -106,7 +106,7 @@ static inline int wait_on_page_fscache_killable(struct page *page) return folio_wait_private_2_killable(page_folio(page)); } -enum netfs_read_source { +enum netfs_io_source { NETFS_FILL_WITH_ZEROES, NETFS_DOWNLOAD_FROM_SERVER, NETFS_READ_FROM_CACHE, @@ -130,8 +130,8 @@ struct netfs_cache_resources { /* * Descriptor for a single component subrequest. */ -struct netfs_read_subrequest { - struct netfs_read_request *rreq; /* Supervising read request */ +struct netfs_io_subrequest { + struct netfs_io_request *rreq; /* Supervising read request */ struct list_head rreq_link; /* Link in rreq->subrequests */ loff_t start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ @@ -139,7 +139,7 @@ struct netfs_read_subrequest { refcount_t usage; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ - enum netfs_read_source source; /* Where to read from */ + enum netfs_io_source source; /* Where to read from */ unsigned long flags; #define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ @@ -152,7 +152,7 @@ struct netfs_read_subrequest { * Descriptor for a read helper request. This is used to make multiple I/O * requests on a variety of sources and then stitch the result together. */ -struct netfs_read_request { +struct netfs_io_request { struct work_struct work; struct inode *inode; /* The file being accessed */ struct address_space *mapping; /* The mapping being accessed */ @@ -160,8 +160,8 @@ struct netfs_read_request { struct list_head subrequests; /* Requests to fetch I/O from disk or net */ void *netfs_priv; /* Private data for the netfs */ unsigned int debug_id; - atomic_t nr_rd_ops; /* Number of read ops in progress */ - atomic_t nr_wr_ops; /* Number of write ops in progress */ + atomic_t nr_outstanding; /* Number of read ops in progress */ + atomic_t nr_copy_ops; /* Number of write ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ @@ -176,23 +176,23 @@ struct netfs_read_request { #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ - const struct netfs_read_request_ops *netfs_ops; + const struct netfs_request_ops *netfs_ops; }; /* * Operations the network filesystem can/must provide to the helpers. */ -struct netfs_read_request_ops { +struct netfs_request_ops { bool (*is_cache_enabled)(struct inode *inode); - void (*init_rreq)(struct netfs_read_request *rreq, struct file *file); - int (*begin_cache_operation)(struct netfs_read_request *rreq); - void (*expand_readahead)(struct netfs_read_request *rreq); - bool (*clamp_length)(struct netfs_read_subrequest *subreq); - void (*issue_op)(struct netfs_read_subrequest *subreq); - bool (*is_still_valid)(struct netfs_read_request *rreq); + void (*init_request)(struct netfs_io_request *rreq, struct file *file); + int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*expand_readahead)(struct netfs_io_request *rreq); + bool (*clamp_length)(struct netfs_io_subrequest *subreq); + void (*issue_op)(struct netfs_io_subrequest *subreq); + bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); - void (*done)(struct netfs_read_request *rreq); + void (*done)(struct netfs_io_request *rreq); void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; @@ -235,7 +235,7 @@ struct netfs_cache_ops { /* Prepare a read operation, shortening it to a cached/uncached * boundary as appropriate. */ - enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq, + enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, loff_t i_size); /* Prepare a write operation, working out what part of the write we can @@ -255,19 +255,19 @@ struct netfs_cache_ops { struct readahead_control; extern void netfs_readahead(struct readahead_control *, - const struct netfs_read_request_ops *, + const struct netfs_request_ops *, void *); extern int netfs_readpage(struct file *, struct folio *, - const struct netfs_read_request_ops *, + const struct netfs_request_ops *, void *); extern int netfs_write_begin(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio **, void **, - const struct netfs_read_request_ops *, + const struct netfs_request_ops *, void *); -extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool); +extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); extern void netfs_stats_show(struct seq_file *); #endif /* _LINUX_NETFS_H */ diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index c6f5aa74db89..002d0ae4f9bc 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -424,8 +424,8 @@ TRACE_EVENT(cachefiles_vol_coherency, ); TRACE_EVENT(cachefiles_prep_read, - TP_PROTO(struct netfs_read_subrequest *sreq, - enum netfs_read_source source, + TP_PROTO(struct netfs_io_subrequest *sreq, + enum netfs_io_source source, enum cachefiles_prepare_read_trace why, ino_t cache_inode), @@ -435,7 +435,7 @@ TRACE_EVENT(cachefiles_prep_read, __field(unsigned int, rreq ) __field(unsigned short, index ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum cachefiles_prepare_read_trace, why ) __field(size_t, len ) __field(loff_t, start ) diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 4d0bf02d490a..b40809c0bd74 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -94,7 +94,7 @@ netfs_failures; #define E_(a, b) { a, b } TRACE_EVENT(netfs_read, - TP_PROTO(struct netfs_read_request *rreq, + TP_PROTO(struct netfs_io_request *rreq, loff_t start, size_t len, enum netfs_read_trace what), @@ -127,7 +127,7 @@ TRACE_EVENT(netfs_read, ); TRACE_EVENT(netfs_rreq, - TP_PROTO(struct netfs_read_request *rreq, + TP_PROTO(struct netfs_io_request *rreq, enum netfs_rreq_trace what), TP_ARGS(rreq, what), @@ -151,7 +151,7 @@ TRACE_EVENT(netfs_rreq, ); TRACE_EVENT(netfs_sreq, - TP_PROTO(struct netfs_read_subrequest *sreq, + TP_PROTO(struct netfs_io_subrequest *sreq, enum netfs_sreq_trace what), TP_ARGS(sreq, what), @@ -161,7 +161,7 @@ TRACE_EVENT(netfs_sreq, __field(unsigned short, index ) __field(short, error ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum netfs_sreq_trace, what ) __field(size_t, len ) __field(size_t, transferred ) @@ -190,8 +190,8 @@ TRACE_EVENT(netfs_sreq, ); TRACE_EVENT(netfs_failure, - TP_PROTO(struct netfs_read_request *rreq, - struct netfs_read_subrequest *sreq, + TP_PROTO(struct netfs_io_request *rreq, + struct netfs_io_subrequest *sreq, int error, enum netfs_failure what), TP_ARGS(rreq, sreq, error, what), @@ -201,7 +201,7 @@ TRACE_EVENT(netfs_failure, __field(unsigned short, index ) __field(short, error ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum netfs_failure, what ) __field(size_t, len ) __field(size_t, transferred ) -- cgit v1.2.3-71-gd317 From f18a378580a761c8559b7d90afaa157269559c05 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 17 Feb 2022 10:14:32 +0000 Subject: netfs: Finish off rename of netfs_read_request to netfs_io_request Adjust helper function names and comments after mass rename of struct netfs_read_*request to struct netfs_io_*request. Changes ======= ver #2) - Make the changes in the docs also. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622992433.3564931.6684311087845150271.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678196111.1200972.5001114956865989528.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692892567.2099075.13895804222087028813.stgit@warthog.procyon.org.uk/ # v3 --- Documentation/filesystems/netfs_library.rst | 4 +- fs/9p/vfs_addr.c | 6 +-- fs/afs/file.c | 4 +- fs/cachefiles/io.c | 4 +- fs/ceph/addr.c | 6 +-- fs/netfs/read_helper.c | 83 +++++++++++++++-------------- include/linux/netfs.h | 22 ++++---- 7 files changed, 65 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index a997e2d4321d..4eb7e7b7b0fc 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -250,7 +250,7 @@ through which it can issue requests and negotiate:: int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); - void (*issue_op)(struct netfs_io_subrequest *subreq); + void (*issue_read)(struct netfs_io_subrequest *subreq); bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); @@ -305,7 +305,7 @@ The operations are as follows: This should return 0 on success and an error code on error. - * ``issue_op()`` + * ``issue_read()`` [Required] The helpers use this to dispatch a subrequest to the server for reading. In the subrequest, ->start, ->len and ->transferred indicate what diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 7b79fabe7593..fdc1033a1546 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -28,10 +28,10 @@ #include "fid.h" /** - * v9fs_req_issue_op - Issue a read from 9P + * v9fs_issue_read - Issue a read from 9P * @subreq: The read to make */ -static void v9fs_req_issue_op(struct netfs_io_subrequest *subreq) +static void v9fs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct p9_fid *fid = rreq->netfs_priv; @@ -106,7 +106,7 @@ static const struct netfs_request_ops v9fs_req_ops = { .init_request = v9fs_init_request, .is_cache_enabled = v9fs_is_cache_enabled, .begin_cache_operation = v9fs_begin_cache_operation, - .issue_op = v9fs_req_issue_op, + .issue_read = v9fs_issue_read, .cleanup = v9fs_req_cleanup, }; diff --git a/fs/afs/file.c b/fs/afs/file.c index e55761f8858c..b19d635eed12 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -310,7 +310,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) return afs_do_sync_operation(op); } -static void afs_req_issue_op(struct netfs_io_subrequest *subreq) +static void afs_issue_read(struct netfs_io_subrequest *subreq) { struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); struct afs_read *fsreq; @@ -401,7 +401,7 @@ const struct netfs_request_ops afs_req_ops = { .is_cache_enabled = afs_is_cache_enabled, .begin_cache_operation = afs_begin_cache_operation, .check_write_begin = afs_check_write_begin, - .issue_op = afs_req_issue_op, + .issue_read = afs_issue_read, .cleanup = afs_priv_cleanup, }; diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 6ac6fdbc70d3..b19f496db9ad 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -406,7 +406,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * } if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) { - __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); why = cachefiles_trace_read_no_data; goto out_no_object; } @@ -475,7 +475,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * goto out; download_and_store: - __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); out: cachefiles_end_secure(cache, saved_cred); out_no_object: diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 9d995f351079..9189257476f8 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -259,7 +259,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) size_t len; __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); - __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); if (subreq->start >= inode->i_size) goto out; @@ -298,7 +298,7 @@ out: return true; } -static void ceph_netfs_issue_op(struct netfs_io_subrequest *subreq) +static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct inode *inode = rreq->inode; @@ -367,7 +367,7 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) static const struct netfs_request_ops ceph_netfs_read_ops = { .is_cache_enabled = ceph_is_cache_enabled, .begin_cache_operation = ceph_begin_cache_operation, - .issue_op = ceph_netfs_issue_op, + .issue_read = ceph_netfs_issue_read, .expand_readahead = ceph_netfs_expand_readahead, .clamp_length = ceph_netfs_clamp_length, .check_write_begin = ceph_netfs_check_write_begin, diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 50035d93f1dc..26d54055b17e 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -37,7 +37,7 @@ static void netfs_put_subrequest(struct netfs_io_subrequest *subreq, __netfs_put_subrequest(subreq, was_async); } -static struct netfs_io_request *netfs_alloc_read_request( +static struct netfs_io_request *netfs_alloc_request( const struct netfs_request_ops *ops, void *netfs_priv, struct file *file) { @@ -63,13 +63,12 @@ static struct netfs_io_request *netfs_alloc_read_request( return rreq; } -static void netfs_get_read_request(struct netfs_io_request *rreq) +static void netfs_get_request(struct netfs_io_request *rreq) { refcount_inc(&rreq->usage); } -static void netfs_rreq_clear_subreqs(struct netfs_io_request *rreq, - bool was_async) +static void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) { struct netfs_io_subrequest *subreq; @@ -81,11 +80,11 @@ static void netfs_rreq_clear_subreqs(struct netfs_io_request *rreq, } } -static void netfs_free_read_request(struct work_struct *work) +static void netfs_free_request(struct work_struct *work) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); - netfs_rreq_clear_subreqs(rreq, false); + netfs_clear_subrequests(rreq, false); if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); @@ -95,15 +94,15 @@ static void netfs_free_read_request(struct work_struct *work) netfs_stat_d(&netfs_n_rh_rreq); } -static void netfs_put_read_request(struct netfs_io_request *rreq, bool was_async) +static void netfs_put_request(struct netfs_io_request *rreq, bool was_async) { if (refcount_dec_and_test(&rreq->usage)) { if (was_async) { - rreq->work.func = netfs_free_read_request; + rreq->work.func = netfs_free_request; if (!queue_work(system_unbound_wq, &rreq->work)) BUG(); } else { - netfs_free_read_request(&rreq->work); + netfs_free_request(&rreq->work); } } } @@ -121,14 +120,14 @@ static struct netfs_io_subrequest *netfs_alloc_subrequest( INIT_LIST_HEAD(&subreq->rreq_link); refcount_set(&subreq->usage, 2); subreq->rreq = rreq; - netfs_get_read_request(rreq); + netfs_get_request(rreq); netfs_stat(&netfs_n_rh_sreq); } return subreq; } -static void netfs_get_read_subrequest(struct netfs_io_subrequest *subreq) +static void netfs_get_subrequest(struct netfs_io_subrequest *subreq) { refcount_inc(&subreq->usage); } @@ -141,7 +140,7 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); netfs_stat_d(&netfs_n_rh_sreq); - netfs_put_read_request(rreq, was_async); + netfs_put_request(rreq, was_async); } /* @@ -216,7 +215,7 @@ static void netfs_read_from_server(struct netfs_io_request *rreq, struct netfs_io_subrequest *subreq) { netfs_stat(&netfs_n_rh_download); - rreq->netfs_ops->issue_op(subreq); + rreq->netfs_ops->issue_read(subreq); } /* @@ -225,8 +224,8 @@ static void netfs_read_from_server(struct netfs_io_request *rreq, static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) { trace_netfs_rreq(rreq, netfs_rreq_trace_done); - netfs_rreq_clear_subreqs(rreq, was_async); - netfs_put_read_request(rreq, was_async); + netfs_clear_subrequests(rreq, was_async); + netfs_put_request(rreq, was_async); } /* @@ -306,7 +305,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) atomic_inc(&rreq->nr_copy_ops); list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { - if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) { + if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { list_del_init(&subreq->rreq_link); netfs_put_subrequest(subreq, false); } @@ -336,7 +335,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) atomic_inc(&rreq->nr_copy_ops); netfs_stat(&netfs_n_rh_write); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); trace_netfs_sreq(subreq, netfs_sreq_trace_write); cres->ops->write(cres, subreq->start, &iter, netfs_rreq_copy_terminated, subreq); @@ -378,9 +377,9 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) XA_STATE(xas, &rreq->mapping->i_pages, start_page); if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { - __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { - __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); } } @@ -408,7 +407,7 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) pg_failed = true; break; } - if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) + if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) folio_start_fscache(folio); pg_failed |= subreq_failed; if (pgend < iopos + subreq->len) @@ -453,13 +452,13 @@ static void netfs_rreq_unlock(struct netfs_io_request *rreq) static void netfs_rreq_short_read(struct netfs_io_request *rreq, struct netfs_io_subrequest *subreq) { - __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); netfs_stat(&netfs_n_rh_short_read); trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); atomic_inc(&rreq->nr_outstanding); if (subreq->source == NETFS_READ_FROM_CACHE) netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); @@ -493,10 +492,10 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) subreq->error = 0; netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); - netfs_get_read_subrequest(subreq); + netfs_get_subrequest(subreq); atomic_inc(&rreq->nr_outstanding); netfs_read_from_server(rreq, subreq); - } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) { + } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { netfs_rreq_short_read(rreq, subreq); } } @@ -553,7 +552,7 @@ again: clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); - if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); @@ -642,8 +641,8 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, complete: __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); - if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) - set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) + set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); out: trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); @@ -674,7 +673,7 @@ incomplete: __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); } - __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags); set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); goto out; @@ -878,7 +877,7 @@ void netfs_readahead(struct readahead_control *ractl, if (readahead_count(ractl) == 0) goto cleanup; - rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file); + rreq = netfs_alloc_request(ops, netfs_priv, ractl->file); if (!rreq) goto cleanup; rreq->mapping = ractl->mapping; @@ -916,7 +915,7 @@ void netfs_readahead(struct readahead_control *ractl, return; cleanup_free: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); return; cleanup: if (netfs_priv) @@ -953,7 +952,7 @@ int netfs_readpage(struct file *file, _enter("%lx", folio_index(folio)); - rreq = netfs_alloc_read_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(ops, netfs_priv, file); if (!rreq) { if (netfs_priv) ops->cleanup(folio_file_mapping(folio), netfs_priv); @@ -975,7 +974,7 @@ int netfs_readpage(struct file *file, netfs_stat(&netfs_n_rh_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); - netfs_get_read_request(rreq); + netfs_get_request(rreq); atomic_set(&rreq->nr_outstanding, 1); do { @@ -989,7 +988,8 @@ int netfs_readpage(struct file *file, * process. */ do { - wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); + wait_var_event(&rreq->nr_outstanding, + atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); @@ -999,7 +999,7 @@ int netfs_readpage(struct file *file, ret = -EIO; } out: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); return ret; } EXPORT_SYMBOL(netfs_readpage); @@ -1122,7 +1122,7 @@ retry: } ret = -ENOMEM; - rreq = netfs_alloc_read_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(ops, netfs_priv, file); if (!rreq) goto error; rreq->mapping = folio_file_mapping(folio); @@ -1146,7 +1146,7 @@ retry: */ ractl._nr_pages = folio_nr_pages(folio); netfs_rreq_expand(rreq, &ractl); - netfs_get_read_request(rreq); + netfs_get_request(rreq); /* We hold the folio locks, so we can drop the references */ folio_get(folio); @@ -1160,12 +1160,13 @@ retry: } while (rreq->submitted < rreq->len); - /* Keep nr_outstanding incremented so that the ref always belongs to us, and - * the service code isn't punted off to a random thread pool to + /* Keep nr_outstanding incremented so that the ref always belongs to + * us, and the service code isn't punted off to a random thread pool to * process. */ for (;;) { - wait_var_event(&rreq->nr_outstanding, atomic_read(&rreq->nr_outstanding) == 1); + wait_var_event(&rreq->nr_outstanding, + atomic_read(&rreq->nr_outstanding) == 1); netfs_rreq_assess(rreq, false); if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) break; @@ -1177,7 +1178,7 @@ retry: trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); ret = -EIO; } - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); if (ret < 0) goto error; @@ -1193,7 +1194,7 @@ have_folio_no_wait: return 0; error_put: - netfs_put_read_request(rreq, false); + netfs_put_request(rreq, false); error: folio_unlock(folio); folio_put(folio); diff --git a/include/linux/netfs.h b/include/linux/netfs.h index a2ca91cb7a68..f63de27d6f29 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -131,7 +131,7 @@ struct netfs_cache_resources { * Descriptor for a single component subrequest. */ struct netfs_io_subrequest { - struct netfs_io_request *rreq; /* Supervising read request */ + struct netfs_io_request *rreq; /* Supervising I/O request */ struct list_head rreq_link; /* Link in rreq->subrequests */ loff_t start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ @@ -139,29 +139,29 @@ struct netfs_io_subrequest { refcount_t usage; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ - enum netfs_io_source source; /* Where to read from */ + enum netfs_io_source source; /* Where to read from/write to */ unsigned long flags; -#define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */ +#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ -#define NETFS_SREQ_SHORT_READ 2 /* Set if there was a short read from the cache */ +#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ }; /* - * Descriptor for a read helper request. This is used to make multiple I/O - * requests on a variety of sources and then stitch the result together. + * Descriptor for an I/O helper request. This is used to make multiple I/O + * operations to a variety of data stores and then stitch the result together. */ struct netfs_io_request { struct work_struct work; struct inode *inode; /* The file being accessed */ struct address_space *mapping; /* The mapping being accessed */ struct netfs_cache_resources cache_resources; - struct list_head subrequests; /* Requests to fetch I/O from disk or net */ + struct list_head subrequests; /* Contributory I/O operations */ void *netfs_priv; /* Private data for the netfs */ unsigned int debug_id; - atomic_t nr_outstanding; /* Number of read ops in progress */ - atomic_t nr_copy_ops; /* Number of write ops in progress */ + atomic_t nr_outstanding; /* Number of ops in progress */ + atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ @@ -171,7 +171,7 @@ struct netfs_io_request { refcount_t usage; unsigned long flags; #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ -#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */ +#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ @@ -188,7 +188,7 @@ struct netfs_request_ops { int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); - void (*issue_op)(struct netfs_io_subrequest *subreq); + void (*issue_read)(struct netfs_io_subrequest *subreq); bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); -- cgit v1.2.3-71-gd317 From de74023befa1876f64bc5871a2a4a51850517118 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 17 Feb 2022 21:13:05 +0000 Subject: netfs: Trace refcounting on the netfs_io_request struct Add refcount tracing for the netfs_io_request structure. Changes ======= ver #3) - Switch 'W=' to 'R=' in the traceline to match other request debug IDs. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622997668.3564931.14456171619219324968.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678200943.1200972.7241495532327787765.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692900920.2099075.11847712419940675791.stgit@warthog.procyon.org.uk/ # v3 --- fs/netfs/internal.h | 11 +++++++++-- fs/netfs/objects.c | 24 +++++++++++++++++------- fs/netfs/read_helper.c | 14 +++++++------- include/linux/netfs.h | 2 +- include/trace/events/netfs.h | 35 +++++++++++++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index cf7a3ddb16a4..89b02357500d 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -20,13 +20,20 @@ struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops, void *netfs_priv, struct file *file); -void netfs_get_request(struct netfs_io_request *rreq); +void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); -void netfs_put_request(struct netfs_io_request *rreq, bool was_async); +void netfs_put_request(struct netfs_io_request *rreq, bool was_async, + enum netfs_rreq_ref_trace what); struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async); void netfs_get_subrequest(struct netfs_io_subrequest *subreq); +static inline void netfs_see_request(struct netfs_io_request *rreq, + enum netfs_rreq_ref_trace what) +{ + trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); +} + /* * read_helper.c */ diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index f7383c28dc6e..4e29c3bb6e5a 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -27,7 +27,7 @@ struct netfs_io_request *netfs_alloc_request( rreq->debug_id = atomic_inc_return(&debug_ids); INIT_LIST_HEAD(&rreq->subrequests); INIT_WORK(&rreq->work, netfs_rreq_work); - refcount_set(&rreq->usage, 1); + refcount_set(&rreq->ref, 1); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); if (ops->init_request) ops->init_request(rreq, file); @@ -37,9 +37,12 @@ struct netfs_io_request *netfs_alloc_request( return rreq; } -void netfs_get_request(struct netfs_io_request *rreq) +void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) { - refcount_inc(&rreq->usage); + int r; + + __refcount_inc(&rreq->ref, &r); + trace_netfs_rreq_ref(rreq->debug_id, r + 1, what); } void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) @@ -68,9 +71,16 @@ static void netfs_free_request(struct work_struct *work) netfs_stat_d(&netfs_n_rh_rreq); } -void netfs_put_request(struct netfs_io_request *rreq, bool was_async) +void netfs_put_request(struct netfs_io_request *rreq, bool was_async, + enum netfs_rreq_ref_trace what) { - if (refcount_dec_and_test(&rreq->usage)) { + unsigned int debug_id = rreq->debug_id; + bool dead; + int r; + + dead = __refcount_dec_and_test(&rreq->ref, &r); + trace_netfs_rreq_ref(debug_id, r - 1, what); + if (dead) { if (was_async) { rreq->work.func = netfs_free_request; if (!queue_work(system_unbound_wq, &rreq->work)) @@ -93,7 +103,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq INIT_LIST_HEAD(&subreq->rreq_link); refcount_set(&subreq->usage, 2); subreq->rreq = rreq; - netfs_get_request(rreq); + netfs_get_request(rreq, netfs_rreq_trace_get_subreq); netfs_stat(&netfs_n_rh_sreq); } @@ -113,7 +123,7 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); netfs_stat_d(&netfs_n_rh_sreq); - netfs_put_request(rreq, was_async); + netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); } void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async) diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 181aeda32649..620c3be5ec0a 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -109,7 +109,7 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) { trace_netfs_rreq(rreq, netfs_rreq_trace_done); netfs_clear_subrequests(rreq, was_async); - netfs_put_request(rreq, was_async); + netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete); } /* @@ -799,7 +799,7 @@ void netfs_readahead(struct readahead_control *ractl, return; cleanup_free: - netfs_put_request(rreq, false); + netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); return; cleanup: if (netfs_priv) @@ -858,7 +858,7 @@ int netfs_readpage(struct file *file, netfs_stat(&netfs_n_rh_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); - netfs_get_request(rreq); + netfs_get_request(rreq, netfs_rreq_trace_get_hold); atomic_set(&rreq->nr_outstanding, 1); do { @@ -883,7 +883,7 @@ int netfs_readpage(struct file *file, ret = -EIO; } out: - netfs_put_request(rreq, false); + netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); return ret; } EXPORT_SYMBOL(netfs_readpage); @@ -1030,13 +1030,13 @@ retry: */ ractl._nr_pages = folio_nr_pages(folio); netfs_rreq_expand(rreq, &ractl); - netfs_get_request(rreq); /* We hold the folio locks, so we can drop the references */ folio_get(folio); while (readahead_folio(&ractl)) ; + netfs_get_request(rreq, netfs_rreq_trace_get_hold); atomic_set(&rreq->nr_outstanding, 1); do { if (!netfs_rreq_submit_slice(rreq, &debug_index)) @@ -1062,7 +1062,7 @@ retry: trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); ret = -EIO; } - netfs_put_request(rreq, false); + netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); if (ret < 0) goto error; @@ -1078,7 +1078,7 @@ have_folio_no_wait: return 0; error_put: - netfs_put_request(rreq, false); + netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); error: folio_unlock(folio); folio_put(folio); diff --git a/include/linux/netfs.h b/include/linux/netfs.h index f63de27d6f29..541aebe828f3 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -168,7 +168,7 @@ struct netfs_io_request { loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ - refcount_t usage; + refcount_t ref; unsigned long flags; #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 0c7a26c4d11c..e35a5ce52eb5 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -55,6 +55,15 @@ EM(netfs_fail_short_write_begin, "short-write-begin") \ E_(netfs_fail_prepare_write, "prep-write") +#define netfs_rreq_ref_traces \ + EM(netfs_rreq_trace_get_hold, "GET HOLD ") \ + EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ + EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ + EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ + EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \ + EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ + E_(netfs_rreq_trace_new, "NEW ") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -67,6 +76,7 @@ enum netfs_read_trace { netfs_read_traces } __mode(byte); enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte); enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte); enum netfs_failure { netfs_failures } __mode(byte); +enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); #endif @@ -83,6 +93,7 @@ netfs_rreq_traces; netfs_sreq_sources; netfs_sreq_traces; netfs_failures; +netfs_rreq_ref_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -229,6 +240,30 @@ TRACE_EVENT(netfs_failure, __entry->error) ); +TRACE_EVENT(netfs_rreq_ref, + TP_PROTO(unsigned int rreq_debug_id, int ref, + enum netfs_rreq_ref_trace what), + + TP_ARGS(rreq_debug_id, ref, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(int, ref ) + __field(enum netfs_rreq_ref_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq_debug_id; + __entry->ref = ref; + __entry->what = what; + ), + + TP_printk("R=%08x %s r=%u", + __entry->rreq, + __print_symbolic(__entry->what, netfs_rreq_ref_traces), + __entry->ref) + ); + #undef EM #undef E_ #endif /* _TRACE_NETFS_H */ -- cgit v1.2.3-71-gd317 From 6cd3d6fd1fe2feae5ff9f6a821081569bb140bf4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 17 Feb 2022 15:01:24 +0000 Subject: netfs: Trace refcounting on the netfs_io_subrequest struct Add refcount tracing for the netfs_io_subrequest structure. Changes ======= ver #3) - Switch 'W=' to 'R=' in the traceline to match other request debug IDs. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622998584.3564931.5052255990645723639.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678202603.1200972.14726007419792315578.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692901860.2099075.4845820886851239935.stgit@warthog.procyon.org.uk/ # v3 --- fs/netfs/internal.h | 2 -- fs/netfs/objects.c | 32 +++++++++++++++++++++++--------- fs/netfs/read_helper.c | 20 +++++++++++--------- include/linux/netfs.h | 8 +++++++- include/trace/events/netfs.h | 40 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index 89b02357500d..a0b7d1bf9f3d 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -25,8 +25,6 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_put_request(struct netfs_io_request *rreq, bool was_async, enum netfs_rreq_ref_trace what); struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); -void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async); -void netfs_get_subrequest(struct netfs_io_subrequest *subreq); static inline void netfs_see_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 4e29c3bb6e5a..39097893e847 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -53,7 +53,8 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) subreq = list_first_entry(&rreq->subrequests, struct netfs_io_subrequest, rreq_link); list_del(&subreq->rreq_link); - netfs_put_subrequest(subreq, was_async); + netfs_put_subrequest(subreq, was_async, + netfs_sreq_trace_put_clear); } } @@ -101,7 +102,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL); if (subreq) { INIT_LIST_HEAD(&subreq->rreq_link); - refcount_set(&subreq->usage, 2); + refcount_set(&subreq->ref, 2); subreq->rreq = rreq; netfs_get_request(rreq, netfs_rreq_trace_get_subreq); netfs_stat(&netfs_n_rh_sreq); @@ -110,13 +111,18 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq return subreq; } -void netfs_get_subrequest(struct netfs_io_subrequest *subreq) +void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what) { - refcount_inc(&subreq->usage); + int r; + + __refcount_inc(&subreq->ref, &r); + trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1, + what); } -static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, - bool was_async) +static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, + bool was_async) { struct netfs_io_request *rreq = subreq->rreq; @@ -126,8 +132,16 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); } -void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async) +void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, + enum netfs_sreq_ref_trace what) { - if (refcount_dec_and_test(&subreq->usage)) - __netfs_put_subrequest(subreq, was_async); + unsigned int debug_index = subreq->debug_index; + unsigned int debug_id = subreq->rreq->debug_id; + bool dead; + int r; + + dead = __refcount_dec_and_test(&subreq->ref, &r); + trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what); + if (dead) + netfs_free_subrequest(subreq, was_async); } diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 620c3be5ec0a..8f277da487b6 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -167,7 +167,7 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, if (atomic_dec_and_test(&rreq->nr_copy_ops)) netfs_rreq_unmark_after_write(rreq, was_async); - netfs_put_subrequest(subreq, was_async); + netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); } /* @@ -191,7 +191,8 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { list_del_init(&subreq->rreq_link); - netfs_put_subrequest(subreq, false); + netfs_put_subrequest(subreq, false, + netfs_sreq_trace_put_no_copy); } } @@ -203,7 +204,8 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) break; subreq->len += next->len; list_del_init(&next->rreq_link); - netfs_put_subrequest(next, false); + netfs_put_subrequest(next, false, + netfs_sreq_trace_put_merged); } ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, @@ -219,7 +221,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) atomic_inc(&rreq->nr_copy_ops); netfs_stat(&netfs_n_rh_write); - netfs_get_subrequest(subreq); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache); trace_netfs_sreq(subreq, netfs_sreq_trace_write); cres->ops->write(cres, subreq->start, &iter, netfs_rreq_copy_terminated, subreq); @@ -342,7 +344,7 @@ static void netfs_rreq_short_read(struct netfs_io_request *rreq, netfs_stat(&netfs_n_rh_short_read); trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); - netfs_get_subrequest(subreq); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read); atomic_inc(&rreq->nr_outstanding); if (subreq->source == NETFS_READ_FROM_CACHE) netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR); @@ -376,7 +378,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq) subreq->error = 0; netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); - netfs_get_subrequest(subreq); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); atomic_inc(&rreq->nr_outstanding); netfs_read_from_server(rreq, subreq); } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) { @@ -538,7 +540,7 @@ out: else if (u == 1) wake_up_var(&rreq->nr_outstanding); - netfs_put_subrequest(subreq, was_async); + netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); return; incomplete: @@ -683,7 +685,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, subreq_failed: rreq->error = subreq->error; - netfs_put_subrequest(subreq, false); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed); return false; } @@ -1030,13 +1032,13 @@ retry: */ ractl._nr_pages = folio_nr_pages(folio); netfs_rreq_expand(rreq, &ractl); + netfs_get_request(rreq, netfs_rreq_trace_get_hold); /* We hold the folio locks, so we can drop the references */ folio_get(folio); while (readahead_folio(&ractl)) ; - netfs_get_request(rreq, netfs_rreq_trace_get_hold); atomic_set(&rreq->nr_outstanding, 1); do { if (!netfs_rreq_submit_slice(rreq, &debug_index)) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 541aebe828f3..c702bd8ea8da 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -18,6 +18,8 @@ #include #include +enum netfs_sreq_ref_trace; + /* * Overload PG_private_2 to give us PG_fscache - this is used to indicate that * a page is currently backed by a local disk cache @@ -136,7 +138,7 @@ struct netfs_io_subrequest { loff_t start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ - refcount_t usage; + refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ enum netfs_io_source source; /* Where to read from/write to */ @@ -268,6 +270,10 @@ extern int netfs_write_begin(struct file *, struct address_space *, void *); extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); +extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); +extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, + bool was_async, enum netfs_sreq_ref_trace what); extern void netfs_stats_show(struct seq_file *); #endif /* _LINUX_NETFS_H */ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index e35a5ce52eb5..dcea5e888fd0 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -64,6 +64,17 @@ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ E_(netfs_rreq_trace_new, "NEW ") +#define netfs_sreq_ref_traces \ + EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \ + EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ + EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ + EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ + EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \ + EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \ + EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \ + E_(netfs_sreq_trace_put_terminated, "PUT TERM ") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -77,6 +88,7 @@ enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte); enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte); enum netfs_failure { netfs_failures } __mode(byte); enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); +enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); #endif @@ -94,6 +106,7 @@ netfs_sreq_sources; netfs_sreq_traces; netfs_failures; netfs_rreq_ref_traces; +netfs_sreq_ref_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -264,6 +277,33 @@ TRACE_EVENT(netfs_rreq_ref, __entry->ref) ); +TRACE_EVENT(netfs_sreq_ref, + TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index, + int ref, enum netfs_sreq_ref_trace what), + + TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned int, subreq ) + __field(int, ref ) + __field(enum netfs_sreq_ref_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq_debug_id; + __entry->subreq = subreq_debug_index; + __entry->ref = ref; + __entry->what = what; + ), + + TP_printk("R=%08x[%x] %s r=%u", + __entry->rreq, + __entry->subreq, + __print_symbolic(__entry->what, netfs_sreq_ref_traces), + __entry->ref) + ); + #undef EM #undef E_ #endif /* _TRACE_NETFS_H */ -- cgit v1.2.3-71-gd317 From 663dfb65c3b3ea4b8e1944680352992d58f3aa22 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Aug 2021 09:24:42 -0400 Subject: netfs: Refactor arguments for netfs_alloc_read_request Pass start and len to the rreq allocator. This should ensure that the fields are set so that ->init_request() can use them. Also add a parameter to indicates the origin of the request. Ceph can use this to tell whether to get caps. Changes ======= ver #3) - Change the author to me as Jeff feels that most of the patch is my changes now. ver #2) - Show the request origin in the netfs_rreq tracepoint. Signed-off-by: Jeff Layton Co-developed-by: David Howells Signed-off-by: David Howells cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622989020.3564931.17517006047854958747.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678208569.1200972.12153682697842916557.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692904155.2099075.14717645623034355995.stgit@warthog.procyon.org.uk/ # v3 --- fs/netfs/internal.h | 7 +++++-- fs/netfs/objects.c | 13 ++++++++++--- fs/netfs/read_helper.c | 23 +++++++++++------------ include/linux/netfs.h | 7 +++++++ include/trace/events/netfs.h | 11 ++++++++++- 5 files changed, 43 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index a0b7d1bf9f3d..89837e904fa7 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -17,9 +17,12 @@ /* * objects.c */ -struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops, +struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, + struct file *file, + const struct netfs_request_ops *ops, void *netfs_priv, - struct file *file); + loff_t start, size_t len, + enum netfs_io_origin origin); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_put_request(struct netfs_io_request *rreq, bool was_async, diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 39097893e847..986d7a9d25dd 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -11,17 +11,24 @@ /* * Allocate an I/O request and initialise it. */ -struct netfs_io_request *netfs_alloc_request( - const struct netfs_request_ops *ops, void *netfs_priv, - struct file *file) +struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, + struct file *file, + const struct netfs_request_ops *ops, + void *netfs_priv, + loff_t start, size_t len, + enum netfs_io_origin origin) { static atomic_t debug_ids; struct netfs_io_request *rreq; rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); if (rreq) { + rreq->start = start; + rreq->len = len; + rreq->origin = origin; rreq->netfs_ops = ops; rreq->netfs_priv = netfs_priv; + rreq->mapping = mapping; rreq->inode = file_inode(file); rreq->i_size = i_size_read(rreq->inode); rreq->debug_id = atomic_inc_return(&debug_ids); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 8f277da487b6..dea085715286 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -763,12 +763,13 @@ void netfs_readahead(struct readahead_control *ractl, if (readahead_count(ractl) == 0) goto cleanup; - rreq = netfs_alloc_request(ops, netfs_priv, ractl->file); + rreq = netfs_alloc_request(ractl->mapping, ractl->file, + ops, netfs_priv, + readahead_pos(ractl), + readahead_length(ractl), + NETFS_READAHEAD); if (!rreq) goto cleanup; - rreq->mapping = ractl->mapping; - rreq->start = readahead_pos(ractl); - rreq->len = readahead_length(ractl); if (ops->begin_cache_operation) { ret = ops->begin_cache_operation(rreq); @@ -838,16 +839,15 @@ int netfs_readpage(struct file *file, _enter("%lx", folio_index(folio)); - rreq = netfs_alloc_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv, + folio_file_pos(folio), folio_size(folio), + NETFS_READPAGE); if (!rreq) { if (netfs_priv) ops->cleanup(folio_file_mapping(folio), netfs_priv); folio_unlock(folio); return -ENOMEM; } - rreq->mapping = folio_file_mapping(folio); - rreq->start = folio_file_pos(folio); - rreq->len = folio_size(folio); if (ops->begin_cache_operation) { ret = ops->begin_cache_operation(rreq); @@ -1008,12 +1008,11 @@ retry: } ret = -ENOMEM; - rreq = netfs_alloc_request(ops, netfs_priv, file); + rreq = netfs_alloc_request(mapping, file, ops, netfs_priv, + folio_file_pos(folio), folio_size(folio), + NETFS_READ_FOR_WRITE); if (!rreq) goto error; - rreq->mapping = folio_file_mapping(folio); - rreq->start = folio_file_pos(folio); - rreq->len = folio_size(folio); rreq->no_unlock_folio = folio_index(folio); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); netfs_priv = NULL; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c702bd8ea8da..7dc741d9b21b 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -150,6 +150,12 @@ struct netfs_io_subrequest { #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ }; +enum netfs_io_origin { + NETFS_READAHEAD, /* This read was triggered by readahead */ + NETFS_READPAGE, /* This read is a synchronous read */ + NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ +} __mode(byte); + /* * Descriptor for an I/O helper request. This is used to make multiple I/O * operations to a variety of data stores and then stitch the result together. @@ -167,6 +173,7 @@ struct netfs_io_request { size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ + enum netfs_io_origin origin; /* Origin of the request */ loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 556859b0f107..f00e3e1821c8 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -21,6 +21,11 @@ EM(netfs_read_trace_readpage, "READPAGE ") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") +#define netfs_rreq_origins \ + EM(NETFS_READAHEAD, "RA") \ + EM(NETFS_READPAGE, "RP") \ + E_(NETFS_READ_FOR_WRITE, "RW") + #define netfs_rreq_traces \ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ @@ -101,6 +106,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); #define E_(a, b) TRACE_DEFINE_ENUM(a); netfs_read_traces; +netfs_rreq_origins; netfs_rreq_traces; netfs_sreq_sources; netfs_sreq_traces; @@ -159,17 +165,20 @@ TRACE_EVENT(netfs_rreq, TP_STRUCT__entry( __field(unsigned int, rreq ) __field(unsigned int, flags ) + __field(enum netfs_io_origin, origin ) __field(enum netfs_rreq_trace, what ) ), TP_fast_assign( __entry->rreq = rreq->debug_id; __entry->flags = rreq->flags; + __entry->origin = rreq->origin; __entry->what = what; ), - TP_printk("R=%08x %s f=%02x", + TP_printk("R=%08x %s %s f=%02x", __entry->rreq, + __print_symbolic(__entry->origin, netfs_rreq_origins), __print_symbolic(__entry->what, netfs_rreq_traces), __entry->flags) ); -- cgit v1.2.3-71-gd317 From 2de160417315b8d64455fe03e9bb7d3308ac3281 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Jan 2022 21:55:46 +0000 Subject: netfs: Change ->init_request() to return an error code Change the request initialisation function to return an error code so that the network filesystem can return a failure (ENOMEM, for example). This will also allow ceph to abort a ->readahead() op if the server refuses to give it a cap allowing local caching from within the netfslib framework (errors aren't passed back through ->readahead(), so returning, say, -ENOBUFS will cause the op to be aborted). Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164678212401.1200972.16537041523832944934.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692905398.2099075.5238033621684646524.stgit@warthog.procyon.org.uk/ # v3 --- fs/9p/vfs_addr.c | 3 ++- fs/afs/file.c | 3 ++- fs/netfs/objects.c | 41 ++++++++++++++++++++++++----------------- fs/netfs/read_helper.c | 20 ++++++++++++-------- include/linux/netfs.h | 2 +- 5 files changed, 41 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index fdc1033a1546..91d3926c9559 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -56,12 +56,13 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) * @rreq: The read request * @file: The file being read from */ -static void v9fs_init_request(struct netfs_io_request *rreq, struct file *file) +static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { struct p9_fid *fid = file->private_data; refcount_inc(&fid->count); rreq->netfs_priv = fid; + return 0; } /** diff --git a/fs/afs/file.c b/fs/afs/file.c index b19d635eed12..6469d7f98ef5 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -359,9 +359,10 @@ static int afs_symlink_readpage(struct file *file, struct page *page) return ret; } -static void afs_init_request(struct netfs_io_request *rreq, struct file *file) +static int afs_init_request(struct netfs_io_request *rreq, struct file *file) { rreq->netfs_priv = key_get(afs_file_key(file)); + return 0; } static bool afs_is_cache_enabled(struct inode *inode) diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 986d7a9d25dd..ae18827e156b 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -20,27 +20,34 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, { static atomic_t debug_ids; struct netfs_io_request *rreq; + int ret; rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); - if (rreq) { - rreq->start = start; - rreq->len = len; - rreq->origin = origin; - rreq->netfs_ops = ops; - rreq->netfs_priv = netfs_priv; - rreq->mapping = mapping; - rreq->inode = file_inode(file); - rreq->i_size = i_size_read(rreq->inode); - rreq->debug_id = atomic_inc_return(&debug_ids); - INIT_LIST_HEAD(&rreq->subrequests); - INIT_WORK(&rreq->work, netfs_rreq_work); - refcount_set(&rreq->ref, 1); - __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); - if (ops->init_request) - ops->init_request(rreq, file); - netfs_stat(&netfs_n_rh_rreq); + if (!rreq) + return ERR_PTR(-ENOMEM); + + rreq->start = start; + rreq->len = len; + rreq->origin = origin; + rreq->netfs_ops = ops; + rreq->netfs_priv = netfs_priv; + rreq->mapping = mapping; + rreq->inode = file_inode(file); + rreq->i_size = i_size_read(rreq->inode); + rreq->debug_id = atomic_inc_return(&debug_ids); + INIT_LIST_HEAD(&rreq->subrequests); + INIT_WORK(&rreq->work, netfs_rreq_work); + refcount_set(&rreq->ref, 1); + __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); + if (rreq->netfs_ops->init_request) { + ret = rreq->netfs_ops->init_request(rreq, file); + if (ret < 0) { + kfree(rreq); + return ERR_PTR(ret); + } } + netfs_stat(&netfs_n_rh_rreq); return rreq; } diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index dea085715286..b5176f4320f4 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -768,7 +768,7 @@ void netfs_readahead(struct readahead_control *ractl, readahead_pos(ractl), readahead_length(ractl), NETFS_READAHEAD); - if (!rreq) + if (IS_ERR(rreq)) goto cleanup; if (ops->begin_cache_operation) { @@ -842,11 +842,9 @@ int netfs_readpage(struct file *file, rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv, folio_file_pos(folio), folio_size(folio), NETFS_READPAGE); - if (!rreq) { - if (netfs_priv) - ops->cleanup(folio_file_mapping(folio), netfs_priv); - folio_unlock(folio); - return -ENOMEM; + if (IS_ERR(rreq)) { + ret = PTR_ERR(rreq); + goto alloc_error; } if (ops->begin_cache_operation) { @@ -887,6 +885,11 @@ int netfs_readpage(struct file *file, out: netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); return ret; +alloc_error: + if (netfs_priv) + ops->cleanup(folio_file_mapping(folio), netfs_priv); + folio_unlock(folio); + return ret; } EXPORT_SYMBOL(netfs_readpage); @@ -1007,12 +1010,13 @@ retry: goto have_folio_no_wait; } - ret = -ENOMEM; rreq = netfs_alloc_request(mapping, file, ops, netfs_priv, folio_file_pos(folio), folio_size(folio), NETFS_READ_FOR_WRITE); - if (!rreq) + if (IS_ERR(rreq)) { + ret = PTR_ERR(rreq); goto error; + } rreq->no_unlock_folio = folio_index(folio); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); netfs_priv = NULL; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 7dc741d9b21b..4b99e38f73d9 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -193,7 +193,7 @@ struct netfs_io_request { */ struct netfs_request_ops { bool (*is_cache_enabled)(struct inode *inode); - void (*init_request)(struct netfs_io_request *rreq, struct file *file); + int (*init_request)(struct netfs_io_request *rreq, struct file *file); int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); -- cgit v1.2.3-71-gd317 From bc899ee1c898e520574ff4d99356eb2e724a9265 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 29 Jun 2021 22:37:05 +0100 Subject: netfs: Add a netfs inode context Add a netfs_i_context struct that should be included in the network filesystem's own inode struct wrapper, directly after the VFS's inode struct, e.g.: struct my_inode { struct { /* These must be contiguous */ struct inode vfs_inode; struct netfs_i_context netfs_ctx; }; }; The netfs_i_context struct so far contains a single field for the network filesystem to use - the cache cookie: struct netfs_i_context { ... struct fscache_cookie *cache; }; Three functions are provided to help with this: (1) void netfs_i_context_init(struct inode *inode, const struct netfs_request_ops *ops); Initialise the netfs context and set the operations. (2) struct netfs_i_context *netfs_i_context(struct inode *inode); Find the netfs context from the VFS inode. (3) struct inode *netfs_inode(struct netfs_i_context *ctx); Find the VFS inode from the netfs context. Changes ======= ver #4) - Fix netfs_is_cache_enabled() to check cookie->cache_priv to see if a cache is present[3]. - Fix netfs_skip_folio_read() to zero out all of the page, not just some of it[3]. ver #3) - Split out the bit to move ceph cap-getting on readahead into ceph_init_request()[1]. - Stick in a comment to the netfs inode structs indicating the contiguity requirements[2]. ver #2) - Adjust documentation to match. - Use "#if IS_ENABLED()" in netfs_i_cookie(), not "#ifdef". - Move the cap check from ceph_readahead() to ceph_init_request() to be called from netfslib. - Remove ceph_readahead() and use netfs_readahead() directly instead. Signed-off-by: David Howells Acked-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/8af0d47f17d89c06bbf602496dd845f2b0bf25b3.camel@kernel.org/ [1] Link: https://lore.kernel.org/r/beaf4f6a6c2575ed489adb14b257253c868f9a5c.camel@kernel.org/ [2] Link: https://lore.kernel.org/r/3536452.1647421585@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/164622984545.3564931.15691742939278418580.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678213320.1200972.16807551936267647470.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692909854.2099075.9535537286264248057.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/306388.1647595110@warthog.procyon.org.uk/ # v4 --- Documentation/filesystems/netfs_library.rst | 101 ++++++++++++++++++++-------- fs/9p/cache.c | 10 ++- fs/9p/v9fs.c | 4 +- fs/9p/v9fs.h | 13 ++-- fs/9p/vfs_addr.c | 43 ++---------- fs/9p/vfs_inode.c | 13 +++- fs/afs/dynroot.c | 1 + fs/afs/file.c | 26 +------ fs/afs/inode.c | 31 ++++++--- fs/afs/internal.h | 19 ++++-- fs/afs/super.c | 4 +- fs/afs/write.c | 3 +- fs/ceph/addr.c | 31 ++------- fs/ceph/cache.c | 28 ++++---- fs/ceph/cache.h | 11 +-- fs/ceph/inode.c | 6 +- fs/ceph/super.h | 17 +++-- fs/cifs/cifsglob.h | 10 +-- fs/cifs/fscache.c | 11 +-- fs/cifs/fscache.h | 2 +- fs/netfs/internal.h | 18 ++++- fs/netfs/objects.c | 12 ++-- fs/netfs/read_helper.c | 100 +++++++++++++-------------- fs/netfs/stats.c | 1 - include/linux/netfs.h | 81 +++++++++++++++++++--- 25 files changed, 318 insertions(+), 278 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 4eb7e7b7b0fc..9c8bc5666b46 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -7,6 +7,8 @@ Network Filesystem Helper Library .. Contents: - Overview. + - Per-inode context. + - Inode context helper functions. - Buffered read helpers. - Read helper functions. - Read helper structures. @@ -28,6 +30,69 @@ Note that the library module doesn't link against local caching directly, so access must be provided by the netfs. +Per-Inode Context +================= + +The network filesystem helper library needs a place to store a bit of state for +its use on each netfs inode it is helping to manage. To this end, a context +structure is defined:: + + struct netfs_i_context { + const struct netfs_request_ops *ops; + struct fscache_cookie *cache; + }; + +A network filesystem that wants to use netfs lib must place one of these +directly after the VFS ``struct inode`` it allocates, usually as part of its +own struct. This can be done in a way similar to the following:: + + struct my_inode { + struct { + /* These must be contiguous */ + struct inode vfs_inode; + struct netfs_i_context netfs_ctx; + }; + ... + }; + +This allows netfslib to find its state by simple offset from the inode pointer, +thereby allowing the netfslib helper functions to be pointed to directly by the +VFS/VM operation tables. + +The structure contains the following fields: + + * ``ops`` + + The set of operations provided by the network filesystem to netfslib. + + * ``cache`` + + Local caching cookie, or NULL if no caching is enabled. This field does not + exist if fscache is disabled. + + +Inode Context Helper Functions +------------------------------ + +To help deal with the per-inode context, a number helper functions are +provided. Firstly, a function to perform basic initialisation on a context and +set the operations table pointer:: + + void netfs_i_context_init(struct inode *inode, + const struct netfs_request_ops *ops); + +then two functions to cast between the VFS inode structure and the netfs +context:: + + struct netfs_i_context *netfs_i_context(struct inode *inode); + struct inode *netfs_inode(struct netfs_i_context *ctx); + +and finally, a function to get the cache cookie pointer from the context +attached to an inode (or NULL if fscache is disabled):: + + struct fscache_cookie *netfs_i_cookie(struct inode *inode); + + Buffered Read Helpers ===================== @@ -70,38 +135,22 @@ Read Helper Functions Three read helpers are provided:: - void netfs_readahead(struct readahead_control *ractl, - const struct netfs_request_ops *ops, - void *netfs_priv); + void netfs_readahead(struct readahead_control *ractl); int netfs_readpage(struct file *file, - struct folio *folio, - const struct netfs_request_ops *ops, - void *netfs_priv); + struct page *page); int netfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int flags, struct folio **_folio, - void **_fsdata, - const struct netfs_request_ops *ops, - void *netfs_priv); - -Each corresponds to a VM operation, with the addition of a couple of parameters -for the use of the read helpers: + void **_fsdata); - * ``ops`` - - A table of operations through which the helpers can talk to the filesystem. - - * ``netfs_priv`` +Each corresponds to a VM address space operation. These operations use the +state in the per-inode context. - Filesystem private data (can be NULL). - -Both of these values will be stored into the read request structure. - -For ->readahead() and ->readpage(), the network filesystem should just jump -into the corresponding read helper; whereas for ->write_begin(), it may be a +For ->readahead() and ->readpage(), the network filesystem just point directly +at the corresponding read helper; whereas for ->write_begin(), it may be a little more complicated as the network filesystem might want to flush conflicting writes or track dirty data and needs to put the acquired folio if an error occurs after calling the helper. @@ -246,7 +295,6 @@ through which it can issue requests and negotiate:: struct netfs_request_ops { void (*init_request)(struct netfs_io_request *rreq, struct file *file); - bool (*is_cache_enabled)(struct inode *inode); int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); bool (*clamp_length)(struct netfs_io_subrequest *subreq); @@ -265,11 +313,6 @@ The operations are as follows: [Optional] This is called to initialise the request structure. It is given the file for reference and can modify the ->netfs_priv value. - * ``is_cache_enabled()`` - - [Required] This is called by netfs_write_begin() to ask if the file is being - cached. It should return true if it is being cached and false otherwise. - * ``begin_cache_operation()`` [Optional] This is called to ask the network filesystem to call into the diff --git a/fs/9p/cache.c b/fs/9p/cache.c index 55e108e5e133..1c8dc696d516 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c @@ -49,22 +49,20 @@ int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses, void v9fs_cache_inode_get_cookie(struct inode *inode) { - struct v9fs_inode *v9inode; + struct v9fs_inode *v9inode = V9FS_I(inode); struct v9fs_session_info *v9ses; __le32 version; __le64 path; if (!S_ISREG(inode->i_mode)) return; - - v9inode = V9FS_I(inode); - if (WARN_ON(v9inode->fscache)) + if (WARN_ON(v9fs_inode_cookie(v9inode))) return; version = cpu_to_le32(v9inode->qid.version); path = cpu_to_le64(v9inode->qid.path); v9ses = v9fs_inode2v9ses(inode); - v9inode->fscache = + v9inode->netfs_ctx.cache = fscache_acquire_cookie(v9fs_session_cache(v9ses), 0, &path, sizeof(path), @@ -72,5 +70,5 @@ void v9fs_cache_inode_get_cookie(struct inode *inode) i_size_read(&v9inode->vfs_inode)); p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", - inode, v9inode->fscache); + inode, v9fs_inode_cookie(v9inode)); } diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 08f65c40af4f..e28ddf763b3b 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -623,9 +623,7 @@ static void v9fs_sysfs_cleanup(void) static void v9fs_inode_init_once(void *foo) { struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; -#ifdef CONFIG_9P_FSCACHE - v9inode->fscache = NULL; -#endif + memset(&v9inode->qid, 0, sizeof(v9inode->qid)); inode_init_once(&v9inode->vfs_inode); } diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index bc8b30205d36..ec0e8df3b2eb 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -9,6 +9,7 @@ #define FS_9P_V9FS_H #include +#include /** * enum p9_session_flags - option flags for each 9P session @@ -108,14 +109,15 @@ struct v9fs_session_info { #define V9FS_INO_INVALID_ATTR 0x01 struct v9fs_inode { -#ifdef CONFIG_9P_FSCACHE - struct fscache_cookie *fscache; -#endif + struct { + /* These must be contiguous */ + struct inode vfs_inode; /* the VFS's inode record */ + struct netfs_i_context netfs_ctx; /* Netfslib context */ + }; struct p9_qid qid; unsigned int cache_validity; struct p9_fid *writeback_fid; struct mutex v_mutex; - struct inode vfs_inode; }; static inline struct v9fs_inode *V9FS_I(const struct inode *inode) @@ -126,7 +128,7 @@ static inline struct v9fs_inode *V9FS_I(const struct inode *inode) static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode) { #ifdef CONFIG_9P_FSCACHE - return v9inode->fscache; + return netfs_i_cookie(&v9inode->vfs_inode); #else return NULL; #endif @@ -163,6 +165,7 @@ extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses, extern const struct inode_operations v9fs_dir_inode_operations_dotl; extern const struct inode_operations v9fs_file_inode_operations_dotl; extern const struct inode_operations v9fs_symlink_inode_operations_dotl; +extern const struct netfs_request_ops v9fs_req_ops; extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb, int new); diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 91d3926c9559..ed06f3c34e98 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -77,17 +77,6 @@ static void v9fs_req_cleanup(struct address_space *mapping, void *priv) p9_client_clunk(fid); } -/** - * v9fs_is_cache_enabled - Determine if caching is enabled for an inode - * @inode: The inode to check - */ -static bool v9fs_is_cache_enabled(struct inode *inode) -{ - struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode)); - - return fscache_cookie_enabled(cookie) && cookie->cache_priv; -} - /** * v9fs_begin_cache_operation - Begin a cache operation for a read * @rreq: The read request @@ -103,36 +92,13 @@ static int v9fs_begin_cache_operation(struct netfs_io_request *rreq) #endif } -static const struct netfs_request_ops v9fs_req_ops = { +const struct netfs_request_ops v9fs_req_ops = { .init_request = v9fs_init_request, - .is_cache_enabled = v9fs_is_cache_enabled, .begin_cache_operation = v9fs_begin_cache_operation, .issue_read = v9fs_issue_read, .cleanup = v9fs_req_cleanup, }; -/** - * v9fs_vfs_readpage - read an entire page in from 9P - * @file: file being read - * @page: structure to page - * - */ -static int v9fs_vfs_readpage(struct file *file, struct page *page) -{ - struct folio *folio = page_folio(page); - - return netfs_readpage(file, folio, &v9fs_req_ops, NULL); -} - -/** - * v9fs_vfs_readahead - read a set of pages from 9P - * @ractl: The readahead parameters - */ -static void v9fs_vfs_readahead(struct readahead_control *ractl) -{ - netfs_readahead(ractl, &v9fs_req_ops, NULL); -} - /** * v9fs_release_page - release the private state associated with a page * @page: The page to be released @@ -326,8 +292,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata, - &v9fs_req_ops, NULL); + retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata); if (retval < 0) return retval; @@ -388,8 +353,8 @@ static int v9fs_set_page_dirty(struct page *page) #endif const struct address_space_operations v9fs_addr_operations = { - .readpage = v9fs_vfs_readpage, - .readahead = v9fs_vfs_readahead, + .readpage = netfs_readpage, + .readahead = netfs_readahead, .set_page_dirty = v9fs_set_page_dirty, .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2a10242c79c7..a7dc6781a622 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -231,9 +231,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb) v9inode = kmem_cache_alloc(v9fs_inode_cache, GFP_KERNEL); if (!v9inode) return NULL; -#ifdef CONFIG_9P_FSCACHE - v9inode->fscache = NULL; -#endif v9inode->writeback_fid = NULL; v9inode->cache_validity = 0; mutex_init(&v9inode->v_mutex); @@ -250,6 +247,14 @@ void v9fs_free_inode(struct inode *inode) kmem_cache_free(v9fs_inode_cache, V9FS_I(inode)); } +/* + * Set parameters for the netfs library + */ +static void v9fs_set_netfs_context(struct inode *inode) +{ + netfs_i_context_init(inode, &v9fs_req_ops); +} + int v9fs_init_inode(struct v9fs_session_info *v9ses, struct inode *inode, umode_t mode, dev_t rdev) { @@ -338,6 +343,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses, err = -EINVAL; goto error; } + + v9fs_set_netfs_context(inode); error: return err; diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index db832cc931c8..f120bcb8bf73 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -76,6 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); + netfs_i_context_init(inode, NULL); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; if (root) { diff --git a/fs/afs/file.c b/fs/afs/file.c index 6469d7f98ef5..2b68b2070248 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -19,13 +19,11 @@ #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); -static int afs_readpage(struct file *file, struct page *page); static int afs_symlink_readpage(struct file *file, struct page *page); static void afs_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int afs_releasepage(struct page *page, gfp_t gfp_flags); -static void afs_readahead(struct readahead_control *ractl); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static void afs_vm_open(struct vm_area_struct *area); static void afs_vm_close(struct vm_area_struct *area); @@ -52,8 +50,8 @@ const struct inode_operations afs_file_inode_operations = { }; const struct address_space_operations afs_file_aops = { - .readpage = afs_readpage, - .readahead = afs_readahead, + .readpage = netfs_readpage, + .readahead = netfs_readahead, .set_page_dirty = afs_set_page_dirty, .launder_page = afs_launder_page, .releasepage = afs_releasepage, @@ -365,13 +363,6 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file) return 0; } -static bool afs_is_cache_enabled(struct inode *inode) -{ - struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode)); - - return fscache_cookie_enabled(cookie) && cookie->cache_priv; -} - static int afs_begin_cache_operation(struct netfs_io_request *rreq) { #ifdef CONFIG_AFS_FSCACHE @@ -399,25 +390,12 @@ static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) const struct netfs_request_ops afs_req_ops = { .init_request = afs_init_request, - .is_cache_enabled = afs_is_cache_enabled, .begin_cache_operation = afs_begin_cache_operation, .check_write_begin = afs_check_write_begin, .issue_read = afs_issue_read, .cleanup = afs_priv_cleanup, }; -static int afs_readpage(struct file *file, struct page *page) -{ - struct folio *folio = page_folio(page); - - return netfs_readpage(file, folio, &afs_req_ops, NULL); -} - -static void afs_readahead(struct readahead_control *ractl) -{ - netfs_readahead(ractl, &afs_req_ops, NULL); -} - int afs_write_inode(struct inode *inode, struct writeback_control *wbc) { fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode))); diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 5964f8aee090..5b5e40197655 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -53,6 +53,14 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren dump_stack(); } +/* + * Set parameters for the netfs library + */ +static void afs_set_netfs_context(struct afs_vnode *vnode) +{ + netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops); +} + /* * Initialise an inode from the vnode status. */ @@ -128,6 +136,7 @@ static int afs_inode_init_from_status(struct afs_operation *op, } afs_set_i_size(vnode, status->size); + afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); @@ -420,7 +429,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) struct afs_vnode_cache_aux aux; if (vnode->status.type != AFS_FTYPE_FILE) { - vnode->cache = NULL; + vnode->netfs_ctx.cache = NULL; return; } @@ -430,12 +439,14 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) key.vnode_id_ext[1] = htonl(vnode->fid.vnode_hi); afs_set_cache_aux(vnode, &aux); - vnode->cache = fscache_acquire_cookie( - vnode->volume->cache, - vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK, - &key, sizeof(key), - &aux, sizeof(aux), - vnode->status.size); + afs_vnode_set_cache(vnode, + fscache_acquire_cookie( + vnode->volume->cache, + vnode->status.type == AFS_FTYPE_FILE ? + 0 : FSCACHE_ADV_SINGLE_CHUNK, + &key, sizeof(key), + &aux, sizeof(aux), + vnode->status.size)); #endif } @@ -528,6 +539,7 @@ struct inode *afs_root_iget(struct super_block *sb, struct key *key) vnode = AFS_FS_I(inode); vnode->cb_v_break = as->volume->cb_v_break, + afs_set_netfs_context(vnode); op = afs_alloc_operation(key, as->volume); if (IS_ERR(op)) { @@ -786,11 +798,8 @@ void afs_evict_inode(struct inode *inode) afs_put_wb_key(wbk); } -#ifdef CONFIG_AFS_FSCACHE - fscache_relinquish_cookie(vnode->cache, + fscache_relinquish_cookie(afs_vnode_cache(vnode), test_bit(AFS_VNODE_DELETED, &vnode->flags)); - vnode->cache = NULL; -#endif afs_prune_wb_keys(vnode); afs_put_permits(rcu_access_pointer(vnode->permit_cache)); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index c56a0e1719ae..75ca3026457e 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -619,15 +619,16 @@ enum afs_lock_state { * leak from one inode to another. */ struct afs_vnode { - struct inode vfs_inode; /* the VFS's inode record */ + struct { + /* These must be contiguous */ + struct inode vfs_inode; /* the VFS's inode record */ + struct netfs_i_context netfs_ctx; /* Netfslib context */ + }; struct afs_volume *volume; /* volume on which vnode resides */ struct afs_fid fid; /* the file identifier for this inode */ struct afs_file_status status; /* AFS status info for this file */ afs_dataversion_t invalid_before; /* Child dentries are invalid before this */ -#ifdef CONFIG_AFS_FSCACHE - struct fscache_cookie *cache; /* caching cookie */ -#endif struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */ struct mutex io_lock; /* Lock for serialising I/O on this mutex */ struct rw_semaphore validate_lock; /* lock for validating this vnode */ @@ -674,12 +675,20 @@ struct afs_vnode { static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode) { #ifdef CONFIG_AFS_FSCACHE - return vnode->cache; + return netfs_i_cookie(&vnode->vfs_inode); #else return NULL; #endif } +static inline void afs_vnode_set_cache(struct afs_vnode *vnode, + struct fscache_cookie *cookie) +{ +#ifdef CONFIG_AFS_FSCACHE + vnode->netfs_ctx.cache = cookie; +#endif +} + /* * cached security record for one user's attempt to access a vnode */ diff --git a/fs/afs/super.c b/fs/afs/super.c index 5ec9fd97eccc..e66c6f54ac8e 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -688,13 +688,11 @@ static struct inode *afs_alloc_inode(struct super_block *sb) /* Reset anything that shouldn't leak from one inode to the next. */ memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); + afs_vnode_set_cache(vnode, NULL); vnode->volume = NULL; vnode->lock_key = NULL; vnode->permit_cache = NULL; -#ifdef CONFIG_AFS_FSCACHE - vnode->cache = NULL; -#endif vnode->flags = 1 << AFS_VNODE_UNSET; vnode->lock_state = AFS_VNODE_LOCK_NONE; diff --git a/fs/afs/write.c b/fs/afs/write.c index 5e9157d0da29..e4b47f67a408 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -59,8 +59,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata, - &afs_req_ops, NULL); + ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata); if (ret < 0) return ret; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 4aeccafa5dda..5512f448f609 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -403,7 +403,7 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) ceph_put_cap_refs(ci, got); } -static const struct netfs_request_ops ceph_netfs_read_ops = { +const struct netfs_request_ops ceph_netfs_ops = { .init_request = ceph_init_request, .begin_cache_operation = ceph_begin_cache_operation, .issue_read = ceph_netfs_issue_read, @@ -413,28 +413,6 @@ static const struct netfs_request_ops ceph_netfs_read_ops = { .cleanup = ceph_readahead_cleanup, }; -/* read a single page, without unlocking it. */ -static int ceph_readpage(struct file *file, struct page *subpage) -{ - struct folio *folio = page_folio(subpage); - struct inode *inode = file_inode(file); - struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_vino vino = ceph_vino(inode); - size_t len = folio_size(folio); - u64 off = folio_file_pos(folio); - - dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n inline %d", - vino.ino, vino.snap, file, off, len, folio, folio_index(folio), - ci->i_inline_version != CEPH_INLINE_NONE); - - return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); -} - -static void ceph_readahead(struct readahead_control *ractl) -{ - netfs_readahead(ractl, &ceph_netfs_read_ops, NULL); -} - #ifdef CONFIG_CEPH_FSCACHE static void ceph_set_page_fscache(struct page *page) { @@ -1333,8 +1311,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct folio *folio = NULL; int r; - r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, - &ceph_netfs_read_ops, NULL); + r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL); if (r == 0) folio_wait_fscache(folio); if (r < 0) { @@ -1388,8 +1365,8 @@ out: } const struct address_space_operations ceph_aops = { - .readpage = ceph_readpage, - .readahead = ceph_readahead, + .readpage = netfs_readpage, + .readahead = netfs_readahead, .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index 7d22850623ef..ddea99922073 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -29,26 +29,25 @@ void ceph_fscache_register_inode_cookie(struct inode *inode) if (!(inode->i_state & I_NEW)) return; - WARN_ON_ONCE(ci->fscache); + WARN_ON_ONCE(ci->netfs_ctx.cache); - ci->fscache = fscache_acquire_cookie(fsc->fscache, 0, - &ci->i_vino, sizeof(ci->i_vino), - &ci->i_version, sizeof(ci->i_version), - i_size_read(inode)); + ci->netfs_ctx.cache = + fscache_acquire_cookie(fsc->fscache, 0, + &ci->i_vino, sizeof(ci->i_vino), + &ci->i_version, sizeof(ci->i_version), + i_size_read(inode)); } -void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) +void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci) { - struct fscache_cookie *cookie = ci->fscache; - - fscache_relinquish_cookie(cookie, false); + fscache_relinquish_cookie(ceph_fscache_cookie(ci), false); } void ceph_fscache_use_cookie(struct inode *inode, bool will_modify) { struct ceph_inode_info *ci = ceph_inode(inode); - fscache_use_cookie(ci->fscache, will_modify); + fscache_use_cookie(ceph_fscache_cookie(ci), will_modify); } void ceph_fscache_unuse_cookie(struct inode *inode, bool update) @@ -58,9 +57,10 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update) if (update) { loff_t i_size = i_size_read(inode); - fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size); + fscache_unuse_cookie(ceph_fscache_cookie(ci), + &ci->i_version, &i_size); } else { - fscache_unuse_cookie(ci->fscache, NULL, NULL); + fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL); } } @@ -69,14 +69,14 @@ void ceph_fscache_update(struct inode *inode) struct ceph_inode_info *ci = ceph_inode(inode); loff_t i_size = i_size_read(inode); - fscache_update_cookie(ci->fscache, &ci->i_version, &i_size); + fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size); } void ceph_fscache_invalidate(struct inode *inode, bool dio_write) { struct ceph_inode_info *ci = ceph_inode(inode); - fscache_invalidate(ceph_inode(inode)->fscache, + fscache_invalidate(ceph_fscache_cookie(ci), &ci->i_version, i_size_read(inode), dio_write ? FSCACHE_INVAL_DIO_WRITE : 0); } diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index b8b3b5cb6438..c20e43cade94 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -26,14 +26,9 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update); void ceph_fscache_update(struct inode *inode); void ceph_fscache_invalidate(struct inode *inode, bool dio_write); -static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) -{ - ci->fscache = NULL; -} - static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) { - return ci->fscache; + return netfs_i_cookie(&ci->vfs_inode); } static inline void ceph_fscache_resize(struct inode *inode, loff_t to) @@ -91,10 +86,6 @@ static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) { } -static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) -{ -} - static inline void ceph_fscache_register_inode_cookie(struct inode *inode) { } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 7b1e93c8a0d2..6a176d9d394a 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -453,6 +453,9 @@ struct inode *ceph_alloc_inode(struct super_block *sb) dout("alloc_inode %p\n", &ci->vfs_inode); + /* Set parameters for the netfs library */ + netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops); + spin_lock_init(&ci->i_ceph_lock); ci->i_version = 0; @@ -538,9 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb) INIT_WORK(&ci->i_work, ceph_inode_work); ci->i_work_mask = 0; memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); - - ceph_fscache_inode_init(ci); - return &ci->vfs_inode; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 0b4b519682f1..e1c65aa8d3b6 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -17,13 +17,11 @@ #include #include #include +#include +#include #include -#ifdef CONFIG_CEPH_FSCACHE -#include -#endif - /* large granularity for statfs utilization stats to facilitate * large volume sizes on 32-bit machines. */ #define CEPH_BLOCK_SHIFT 22 /* 4 MB */ @@ -317,6 +315,11 @@ struct ceph_inode_xattrs_info { * Ceph inode. */ struct ceph_inode_info { + struct { + /* These must be contiguous */ + struct inode vfs_inode; + struct netfs_i_context netfs_ctx; /* Netfslib context */ + }; struct ceph_vino i_vino; /* ceph ino + snap */ spinlock_t i_ceph_lock; @@ -427,11 +430,6 @@ struct ceph_inode_info { struct work_struct i_work; unsigned long i_work_mask; - -#ifdef CONFIG_CEPH_FSCACHE - struct fscache_cookie *fscache; -#endif - struct inode vfs_inode; /* at end */ }; static inline struct ceph_inode_info * @@ -1215,6 +1213,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci, /* addr.c */ extern const struct address_space_operations ceph_aops; +extern const struct netfs_request_ops ceph_netfs_ops; extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); extern int ceph_uninline_data(struct file *file); extern int ceph_pool_perm_check(struct inode *inode, int need); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 48b343d03430..0a4085ced40f 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -16,6 +16,7 @@ #include #include #include +#include #include "cifs_fs_sb.h" #include "cifsacl.h" #include @@ -1402,6 +1403,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); */ struct cifsInodeInfo { + struct { + /* These must be contiguous */ + struct inode vfs_inode; /* the VFS's inode record */ + struct netfs_i_context netfs_ctx; /* Netfslib context */ + }; bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ /* @@ -1432,10 +1438,6 @@ struct cifsInodeInfo { u64 uniqueid; /* server inode number */ u64 createtime; /* creation time on server */ __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for this inode */ -#ifdef CONFIG_CIFS_FSCACHE - struct fscache_cookie *fscache; -#endif - struct inode vfs_inode; struct list_head deferred_closes; /* list of deferred closes */ spinlock_t deferred_lock; /* protection on deferred list */ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */ diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index b47c2011ce5b..a638b29e9062 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -103,7 +103,7 @@ void cifs_fscache_get_inode_cookie(struct inode *inode) cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd); - cifsi->fscache = + cifsi->netfs_ctx.cache = fscache_acquire_cookie(tcon->fscache, 0, &cifsi->uniqueid, sizeof(cifsi->uniqueid), &cd, sizeof(cd), @@ -126,11 +126,12 @@ void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) void cifs_fscache_release_inode_cookie(struct inode *inode) { struct cifsInodeInfo *cifsi = CIFS_I(inode); + struct fscache_cookie *cookie = cifs_inode_cookie(inode); - if (cifsi->fscache) { - cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache); - fscache_relinquish_cookie(cifsi->fscache, false); - cifsi->fscache = NULL; + if (cookie) { + cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie); + fscache_relinquish_cookie(cookie, false); + cifsi->netfs_ctx.cache = NULL; } } diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 55129908e2c1..52355c0912ae 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -61,7 +61,7 @@ void cifs_fscache_fill_coherency(struct inode *inode, static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { - return CIFS_I(inode)->fscache; + return netfs_i_cookie(inode); } static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index 89837e904fa7..54c761bcc8e6 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -6,6 +6,7 @@ */ #include +#include #include #ifdef pr_fmt @@ -19,8 +20,6 @@ */ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, struct file *file, - const struct netfs_request_ops *ops, - void *netfs_priv, loff_t start, size_t len, enum netfs_io_origin origin); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); @@ -81,6 +80,21 @@ static inline void netfs_stat_d(atomic_t *stat) #define netfs_stat_d(x) do {} while(0) #endif +/* + * Miscellaneous functions. + */ +static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx) +{ +#if IS_ENABLED(CONFIG_FSCACHE) + struct fscache_cookie *cookie = ctx->cache; + + return fscache_cookie_valid(cookie) && cookie->cache_priv && + fscache_cookie_enabled(cookie); +#else + return false; +#endif +} + /*****************************************************************************/ /* * debug tracing diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index ae18827e156b..657b19e60118 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -13,12 +13,12 @@ */ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, struct file *file, - const struct netfs_request_ops *ops, - void *netfs_priv, loff_t start, size_t len, enum netfs_io_origin origin) { static atomic_t debug_ids; + struct inode *inode = file ? file_inode(file) : mapping->host; + struct netfs_i_context *ctx = netfs_i_context(inode); struct netfs_io_request *rreq; int ret; @@ -29,11 +29,10 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, rreq->start = start; rreq->len = len; rreq->origin = origin; - rreq->netfs_ops = ops; - rreq->netfs_priv = netfs_priv; + rreq->netfs_ops = ctx->ops; rreq->mapping = mapping; - rreq->inode = file_inode(file); - rreq->i_size = i_size_read(rreq->inode); + rreq->inode = inode; + rreq->i_size = i_size_read(inode); rreq->debug_id = atomic_inc_return(&debug_ids); INIT_LIST_HEAD(&rreq->subrequests); INIT_WORK(&rreq->work, netfs_rreq_work); @@ -76,6 +75,7 @@ static void netfs_free_request(struct work_struct *work) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); + netfs_clear_subrequests(rreq, false); if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index b5176f4320f4..c048cd328ce5 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -14,7 +14,6 @@ #include #include #include -#include #include "internal.h" #define CREATE_TRACE_POINTS #include @@ -735,8 +734,6 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq, /** * netfs_readahead - Helper to manage a read request * @ractl: The description of the readahead request - * @ops: The network filesystem's operations for the helper to use - * @netfs_priv: Private netfs data to be retained in the request * * Fulfil a readahead request by drawing data from the cache if possible, or * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O @@ -744,35 +741,32 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq, * readahead window can be expanded in either direction to a more convenient * alighment for RPC efficiency or to make storage in the cache feasible. * - * The calling netfs must provide a table of operations, only one of which, - * issue_op, is mandatory. It may also be passed a private token, which will - * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup(). + * The calling netfs must initialise a netfs context contiguous to the vfs + * inode before calling this. * * This is usable whether or not caching is enabled. */ -void netfs_readahead(struct readahead_control *ractl, - const struct netfs_request_ops *ops, - void *netfs_priv) +void netfs_readahead(struct readahead_control *ractl) { struct netfs_io_request *rreq; + struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host); unsigned int debug_index = 0; int ret; _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); if (readahead_count(ractl) == 0) - goto cleanup; + return; rreq = netfs_alloc_request(ractl->mapping, ractl->file, - ops, netfs_priv, readahead_pos(ractl), readahead_length(ractl), NETFS_READAHEAD); if (IS_ERR(rreq)) - goto cleanup; + return; - if (ops->begin_cache_operation) { - ret = ops->begin_cache_operation(rreq); + if (ctx->ops->begin_cache_operation) { + ret = ctx->ops->begin_cache_operation(rreq); if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) goto cleanup_free; } @@ -804,42 +798,35 @@ void netfs_readahead(struct readahead_control *ractl, cleanup_free: netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); return; -cleanup: - if (netfs_priv) - ops->cleanup(ractl->mapping, netfs_priv); - return; } EXPORT_SYMBOL(netfs_readahead); /** * netfs_readpage - Helper to manage a readpage request * @file: The file to read from - * @folio: The folio to read - * @ops: The network filesystem's operations for the helper to use - * @netfs_priv: Private netfs data to be retained in the request + * @subpage: A subpage of the folio to read * * Fulfil a readpage request by drawing data from the cache if possible, or the * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests * from different sources will get munged together. * - * The calling netfs must provide a table of operations, only one of which, - * issue_op, is mandatory. It may also be passed a private token, which will - * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup(). + * The calling netfs must initialise a netfs context contiguous to the vfs + * inode before calling this. * * This is usable whether or not caching is enabled. */ -int netfs_readpage(struct file *file, - struct folio *folio, - const struct netfs_request_ops *ops, - void *netfs_priv) +int netfs_readpage(struct file *file, struct page *subpage) { + struct folio *folio = page_folio(subpage); + struct address_space *mapping = folio->mapping; struct netfs_io_request *rreq; + struct netfs_i_context *ctx = netfs_i_context(mapping->host); unsigned int debug_index = 0; int ret; _enter("%lx", folio_index(folio)); - rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv, + rreq = netfs_alloc_request(mapping, file, folio_file_pos(folio), folio_size(folio), NETFS_READPAGE); if (IS_ERR(rreq)) { @@ -847,8 +834,8 @@ int netfs_readpage(struct file *file, goto alloc_error; } - if (ops->begin_cache_operation) { - ret = ops->begin_cache_operation(rreq); + if (ctx->ops->begin_cache_operation) { + ret = ctx->ops->begin_cache_operation(rreq); if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) { folio_unlock(folio); goto out; @@ -886,8 +873,6 @@ out: netfs_put_request(rreq, false, netfs_rreq_trace_put_hold); return ret; alloc_error: - if (netfs_priv) - ops->cleanup(folio_file_mapping(folio), netfs_priv); folio_unlock(folio); return ret; } @@ -898,6 +883,7 @@ EXPORT_SYMBOL(netfs_readpage); * @folio: The folio being prepared * @pos: starting position for the write * @len: length of write + * @always_fill: T if the folio should always be completely filled/cleared * * In some cases, write_begin doesn't need to read at all: * - full folio write @@ -907,17 +893,27 @@ EXPORT_SYMBOL(netfs_readpage); * If any of these criteria are met, then zero out the unwritten parts * of the folio and return true. Otherwise, return false. */ -static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len) +static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len, + bool always_fill) { struct inode *inode = folio_inode(folio); loff_t i_size = i_size_read(inode); size_t offset = offset_in_folio(folio, pos); + size_t plen = folio_size(folio); + + if (unlikely(always_fill)) { + if (pos - offset + len <= i_size) + return false; /* Page entirely before EOF */ + zero_user_segment(&folio->page, 0, plen); + folio_mark_uptodate(folio); + return true; + } /* Full folio write */ - if (offset == 0 && len >= folio_size(folio)) + if (offset == 0 && len >= plen) return true; - /* pos beyond last folio in the file */ + /* Page entirely beyond the end of the file */ if (pos - offset >= i_size) goto zero_out; @@ -927,7 +923,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len) return false; zero_out: - zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio)); + zero_user_segments(&folio->page, 0, offset, offset + len, plen); return true; } @@ -940,8 +936,6 @@ zero_out: * @aop_flags: AOP_* flags * @_folio: Where to put the resultant folio * @_fsdata: Place for the netfs to store a cookie - * @ops: The network filesystem's operations for the helper to use - * @netfs_priv: Private netfs data to be retained in the request * * Pre-read data for a write-begin request by drawing data from the cache if * possible, or the netfs if not. Space beyond the EOF is zero-filled. @@ -960,17 +954,18 @@ zero_out: * should go ahead; unlock the folio and return -EAGAIN to cause the folio to * be regot; or return an error. * + * The calling netfs must initialise a netfs context contiguous to the vfs + * inode before calling this. + * * This is usable whether or not caching is enabled. */ int netfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int aop_flags, - struct folio **_folio, void **_fsdata, - const struct netfs_request_ops *ops, - void *netfs_priv) + struct folio **_folio, void **_fsdata) { struct netfs_io_request *rreq; + struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); struct folio *folio; - struct inode *inode = file_inode(file); unsigned int debug_index = 0, fgp_flags; pgoff_t index = pos >> PAGE_SHIFT; int ret; @@ -986,9 +981,9 @@ retry: if (!folio) return -ENOMEM; - if (ops->check_write_begin) { + if (ctx->ops->check_write_begin) { /* Allow the netfs (eg. ceph) to flush conflicts. */ - ret = ops->check_write_begin(file, pos, len, folio, _fsdata); + ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata); if (ret < 0) { trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); if (ret == -EAGAIN) @@ -1004,13 +999,13 @@ retry: * within the cache granule containing the EOF, in which case we need * to preload the granule. */ - if (!ops->is_cache_enabled(inode) && - netfs_skip_folio_read(folio, pos, len)) { + if (!netfs_is_cache_enabled(ctx) && + netfs_skip_folio_read(folio, pos, len, false)) { netfs_stat(&netfs_n_rh_write_zskip); goto have_folio_no_wait; } - rreq = netfs_alloc_request(mapping, file, ops, netfs_priv, + rreq = netfs_alloc_request(mapping, file, folio_file_pos(folio), folio_size(folio), NETFS_READ_FOR_WRITE); if (IS_ERR(rreq)) { @@ -1019,10 +1014,9 @@ retry: } rreq->no_unlock_folio = folio_index(folio); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); - netfs_priv = NULL; - if (ops->begin_cache_operation) { - ret = ops->begin_cache_operation(rreq); + if (ctx->ops->begin_cache_operation) { + ret = ctx->ops->begin_cache_operation(rreq); if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) goto error_put; } @@ -1076,8 +1070,6 @@ have_folio: if (ret < 0) goto error; have_folio_no_wait: - if (netfs_priv) - ops->cleanup(mapping, netfs_priv); *_folio = folio; _leave(" = 0"); return 0; @@ -1087,8 +1079,6 @@ error_put: error: folio_unlock(folio); folio_put(folio); - if (netfs_priv) - ops->cleanup(mapping, netfs_priv); _leave(" = %d", ret); return ret; } diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c index 9ae538c85378..5510a7a14a40 100644 --- a/fs/netfs/stats.c +++ b/fs/netfs/stats.c @@ -7,7 +7,6 @@ #include #include -#include #include "internal.h" atomic_t netfs_n_rh_readahead; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 4b99e38f73d9..8458b30172a5 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -118,6 +118,16 @@ enum netfs_io_source { typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, bool was_async); +/* + * Per-inode description. This must be directly after the inode struct. + */ +struct netfs_i_context { + const struct netfs_request_ops *ops; +#if IS_ENABLED(CONFIG_FSCACHE) + struct fscache_cookie *cache; +#endif +}; + /* * Resources required to do operations on a cache. */ @@ -192,7 +202,6 @@ struct netfs_io_request { * Operations the network filesystem can/must provide to the helpers. */ struct netfs_request_ops { - bool (*is_cache_enabled)(struct inode *inode); int (*init_request)(struct netfs_io_request *rreq, struct file *file); int (*begin_cache_operation)(struct netfs_io_request *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); @@ -263,18 +272,11 @@ struct netfs_cache_ops { }; struct readahead_control; -extern void netfs_readahead(struct readahead_control *, - const struct netfs_request_ops *, - void *); -extern int netfs_readpage(struct file *, - struct folio *, - const struct netfs_request_ops *, - void *); +extern void netfs_readahead(struct readahead_control *); +extern int netfs_readpage(struct file *, struct page *); extern int netfs_write_begin(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio **, - void **, - const struct netfs_request_ops *, - void *); + void **); extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq, @@ -283,4 +285,61 @@ extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async, enum netfs_sreq_ref_trace what); extern void netfs_stats_show(struct seq_file *); +/** + * netfs_i_context - Get the netfs inode context from the inode + * @inode: The inode to query + * + * Get the netfs lib inode context from the network filesystem's inode. The + * context struct is expected to directly follow on from the VFS inode struct. + */ +static inline struct netfs_i_context *netfs_i_context(struct inode *inode) +{ + return (struct netfs_i_context *)(inode + 1); +} + +/** + * netfs_inode - Get the netfs inode from the inode context + * @ctx: The context to query + * + * Get the netfs inode from the netfs library's inode context. The VFS inode + * is expected to directly precede the context struct. + */ +static inline struct inode *netfs_inode(struct netfs_i_context *ctx) +{ + return ((struct inode *)ctx) - 1; +} + +/** + * netfs_i_context_init - Initialise a netfs lib context + * @inode: The inode with which the context is associated + * @ops: The netfs's operations list + * + * Initialise the netfs library context struct. This is expected to follow on + * directly from the VFS inode struct. + */ +static inline void netfs_i_context_init(struct inode *inode, + const struct netfs_request_ops *ops) +{ + struct netfs_i_context *ctx = netfs_i_context(inode); + + memset(ctx, 0, sizeof(*ctx)); + ctx->ops = ops; +} + +/** + * netfs_i_cookie - Get the cache cookie from the inode + * @inode: The inode to query + * + * Get the caching cookie (if enabled) from the network filesystem's inode. + */ +static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode) +{ +#if IS_ENABLED(CONFIG_FSCACHE) + struct netfs_i_context *ctx = netfs_i_context(inode); + return ctx->cache; +#else + return NULL; +#endif +} + #endif /* _LINUX_NETFS_H */ -- cgit v1.2.3-71-gd317 From 4058f742105ecfcbdf99e1139e6c1f74fb8e6db9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 5 Nov 2021 13:55:38 +0000 Subject: netfs: Keep track of the actual remote file size Provide a place in which to keep track of the actual remote file size in the netfs context. This is needed because inode->i_size will be updated as we buffer writes in the pagecache, but the server file size won't get updated until we flush them back. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164623013727.3564931.17659955636985232717.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678219305.1200972.6459431995188365134.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692921865.2099075.5310757978508056134.stgit@warthog.procyon.org.uk/ # v3 --- include/linux/netfs.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 8458b30172a5..c7bf1eaf51d5 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -126,6 +126,7 @@ struct netfs_i_context { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; #endif + loff_t remote_i_size; /* Size of the remote file */ }; /* @@ -324,6 +325,21 @@ static inline void netfs_i_context_init(struct inode *inode, memset(ctx, 0, sizeof(*ctx)); ctx->ops = ops; + ctx->remote_i_size = i_size_read(inode); +} + +/** + * netfs_resize_file - Note that a file got resized + * @inode: The inode being resized + * @new_i_size: The new file size + * + * Inform the netfs lib that a file got resized so that it can adjust its state. + */ +static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) +{ + struct netfs_i_context *ctx = netfs_i_context(inode); + + ctx->remote_i_size = new_i_size; } /** -- cgit v1.2.3-71-gd317 From f58c252e30cf74f68b0054293adc03b5923b9f0e Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Mon, 14 Mar 2022 11:14:32 +0200 Subject: serial: 8250: fix XOFF/XON sending when DMA is used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When 8250 UART is using DMA, x_char (XON/XOFF) is never sent to the wire. After this change, x_char is injected correctly. Create uart_xchar_out() helper for sending the x_char out and accounting related to it. It seems that almost every driver does these same steps with x_char. Except for 8250, however, almost all currently lack .serial_out so they cannot immediately take advantage of this new helper. The downside of this patch is that it might reintroduce the problems some devices faced with mixed DMA/non-DMA transfer which caused revert f967fc8f165f (Revert "serial: 8250_dma: don't bother DMA with small transfers"). However, the impact should be limited to cases with XON/XOFF (that didn't work with DMA capable devices to begin with so this problem is not very likely to cause a major issue, if any at all). Fixes: 9ee4b83e51f74 ("serial: 8250: Add support for dmaengine") Reported-by: Gilles Buloz Tested-by: Gilles Buloz Reviewed-by: Andy Shevchenko Signed-off-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20220314091432.4288-2-ilpo.jarvinen@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dma.c | 11 ++++++++++- drivers/tty/serial/8250/8250_port.c | 4 +--- drivers/tty/serial/serial_core.c | 14 ++++++++++++++ include/linux/serial_core.h | 2 ++ 4 files changed, 27 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 890fa7ddaa7f..b3c3f7e5851a 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -64,10 +64,19 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; struct dma_async_tx_descriptor *desc; + struct uart_port *up = &p->port; int ret; - if (dma->tx_running) + if (dma->tx_running) { + if (up->x_char) { + dmaengine_pause(dma->txchan); + uart_xchar_out(up, UART_TX); + dmaengine_resume(dma->txchan); + } return 0; + } else if (up->x_char) { + uart_xchar_out(up, UART_TX); + } if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { /* We have been called from __dma_tx_complete() */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 267026892264..318af6f13605 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1822,9 +1822,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) int count; if (port->x_char) { - serial_out(up, UART_TX, port->x_char); - port->icount.tx++; - port->x_char = 0; + uart_xchar_out(port, UART_TX); return; } if (uart_tx_stopped(port)) { diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index a1688a341411..6a8963caf954 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -641,6 +641,20 @@ static void uart_flush_buffer(struct tty_struct *tty) tty_port_tty_wakeup(&state->port); } +/* + * This function performs low-level write of high-priority XON/XOFF + * character and accounting for it. + * + * Requires uart_port to implement .serial_out(). + */ +void uart_xchar_out(struct uart_port *uport, int offset) +{ + serial_port_out(uport, offset, uport->x_char); + uport->icount.tx++; + uport->x_char = 0; +} +EXPORT_SYMBOL_GPL(uart_xchar_out); + /* * This function is used to send a high-priority XON/XOFF character to * the device diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 14ae35f68abb..d4828e69087a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -458,6 +458,8 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); +void uart_xchar_out(struct uart_port *uport, int offset); + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL #define SYSRQ_TIMEOUT (HZ * 5) -- cgit v1.2.3-71-gd317 From 336d4b814bf078fa698488632c19beca47308896 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 12:15:32 -0600 Subject: ptrace: Move setting/clearing ptrace_message into ptrace_stop Today ptrace_message is easy to overlook as it not a core part of ptrace_stop. It has been overlooked so much that there are places that set ptrace_message and don't clear it, and places that never set it. So if you get an unlucky sequence of events the ptracer may be able to read a ptrace_message that does not apply to the current ptrace stop. Move setting of ptrace_message into ptrace_stop so that it always gets set before the stop, and always gets cleared after the stop. This prevents non-sense from being reported to userspace and makes ptrace_message more visible in the ptrace helper functions so that kernel developers can see it. Link: https://lkml.kernel.org/r/87bky67qfv.fsf_-_@email.froward.int.ebiederm.org Acked-by: Oleg Nesterov Reviewed-by: Kees Cook Signed-off-by: "Eric W. Biederman" --- include/linux/ptrace.h | 9 +++------ include/uapi/linux/ptrace.h | 2 +- kernel/signal.c | 21 ++++++++++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 5310f43e4762..3e6b46e2b7be 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -60,7 +60,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned extern void ptrace_disable(struct task_struct *); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); -extern void ptrace_notify(int exit_code); +extern void ptrace_notify(int exit_code, unsigned long message); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, const struct cred *ptracer_cred); @@ -155,8 +155,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event) static inline void ptrace_event(int event, unsigned long message) { if (unlikely(ptrace_event_enabled(current, event))) { - current->ptrace_message = message; - ptrace_notify((event << 8) | SIGTRAP); + ptrace_notify((event << 8) | SIGTRAP, message); } else if (event == PTRACE_EVENT_EXEC) { /* legacy EXEC report via SIGTRAP */ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) @@ -424,8 +423,7 @@ static inline int ptrace_report_syscall(unsigned long message) if (!(ptrace & PT_PTRACED)) return 0; - current->ptrace_message = message; - ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), message); /* * this isn't the same as continuing with a signal, but it will do @@ -437,7 +435,6 @@ static inline int ptrace_report_syscall(unsigned long message) current->exit_code = 0; } - current->ptrace_message = 0; return fatal_signal_pending(current); } diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index b7af92e07d1f..195ae64a8c87 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -114,7 +114,7 @@ struct ptrace_rseq_configuration { /* * These values are stored in task->ptrace_message - * by ptrace_report_syscall_* to describe the current syscall-stop. + * by ptrace_stop to describe the current syscall-stop. */ #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 diff --git a/kernel/signal.c b/kernel/signal.c index c2dee5420567..a49ac7149256 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2191,7 +2191,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, * If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. */ -static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) +static void ptrace_stop(int exit_code, int why, int clear_code, + unsigned long message, kernel_siginfo_t *info) __releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) { @@ -2237,6 +2238,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t */ smp_wmb(); + current->ptrace_message = message; current->last_siginfo = info; current->exit_code = exit_code; @@ -2315,6 +2317,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t */ spin_lock_irq(¤t->sighand->siglock); current->last_siginfo = NULL; + current->ptrace_message = 0; /* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; @@ -2327,7 +2330,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t recalc_sigpending_tsk(current); } -static void ptrace_do_notify(int signr, int exit_code, int why) +static void ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) { kernel_siginfo_t info; @@ -2338,17 +2341,17 @@ static void ptrace_do_notify(int signr, int exit_code, int why) info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); /* Let the debugger run. */ - ptrace_stop(exit_code, why, 1, &info); + ptrace_stop(exit_code, why, 1, message, &info); } -void ptrace_notify(int exit_code) +void ptrace_notify(int exit_code, unsigned long message) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); if (unlikely(task_work_pending(current))) task_work_run(); spin_lock_irq(¤t->sighand->siglock); - ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); + ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); spin_unlock_irq(¤t->sighand->siglock); } @@ -2504,10 +2507,10 @@ static void do_jobctl_trap(void) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), - CLD_STOPPED); + CLD_STOPPED, 0); } else { WARN_ON_ONCE(!signr); - ptrace_stop(signr, CLD_STOPPED, 0, NULL); + ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL); current->exit_code = 0; } } @@ -2561,7 +2564,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; - ptrace_stop(signr, CLD_TRAPPED, 0, info); + ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); /* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; @@ -2891,7 +2894,7 @@ static void signal_delivered(struct ksignal *ksig, int stepping) if (current->sas_ss_flags & SS_AUTODISARM) sas_ss_reset(current); if (stepping) - ptrace_notify(SIGTRAP); + ptrace_notify(SIGTRAP, 0); } void signal_setup_done(int failed, struct ksignal *ksig, int stepping) -- cgit v1.2.3-71-gd317 From 6487d1dab837214ec2fd3f0ddd5f787e63be7c20 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 12:19:13 -0600 Subject: ptrace: Return the signal to continue with from ptrace_stop The signal a task should continue with after a ptrace stop is inconsistently read, cleared, and sent. Solve this by reading and clearing the signal to be sent in ptrace_stop. In an ideal world everything except ptrace_signal would share a common implementation of continuing with the signal, so ptracers could count on the signal they ask to continue with actually being delivered. For now retain bug compatibility and just return with the signal number the ptracer requested the code continue with. Link: https://lkml.kernel.org/r/875yoe7qdp.fsf_-_@email.froward.int.ebiederm.org Reviewed-by: Kees Cook Signed-off-by: "Eric W. Biederman" --- include/linux/ptrace.h | 12 ++++++------ kernel/signal.c | 32 +++++++++++++++++++------------- 2 files changed, 25 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 3e6b46e2b7be..15b3d176b6b4 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -60,7 +60,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned extern void ptrace_disable(struct task_struct *); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); -extern void ptrace_notify(int exit_code, unsigned long message); +extern int ptrace_notify(int exit_code, unsigned long message); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, const struct cred *ptracer_cred); @@ -419,21 +419,21 @@ extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oa static inline int ptrace_report_syscall(unsigned long message) { int ptrace = current->ptrace; + int signr; if (!(ptrace & PT_PTRACED)) return 0; - ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), message); + signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), + message); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + if (signr) + send_sig(signr, current, 1); return fatal_signal_pending(current); } diff --git a/kernel/signal.c b/kernel/signal.c index a49ac7149256..f1c4ab85833c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2188,15 +2188,17 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * - * If we actually decide not to stop at all because the tracer - * is gone, we keep current->exit_code unless clear_code. + * Returns the signal the ptracer requested the code resume + * with. If the code did not stop because the tracer is gone, + * the stop signal remains unchanged unless clear_code. */ -static void ptrace_stop(int exit_code, int why, int clear_code, +static int ptrace_stop(int exit_code, int why, int clear_code, unsigned long message, kernel_siginfo_t *info) __releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) { bool gstop_done = false; + bool read_code = true; if (arch_ptrace_stop_needed()) { /* @@ -2305,8 +2307,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, /* tasklist protects us from ptrace_freeze_traced() */ __set_current_state(TASK_RUNNING); + read_code = false; if (clear_code) - current->exit_code = 0; + exit_code = 0; read_unlock(&tasklist_lock); } @@ -2316,8 +2319,11 @@ static void ptrace_stop(int exit_code, int why, int clear_code, * any signal-sending on another CPU that wants to examine it. */ spin_lock_irq(¤t->sighand->siglock); + if (read_code) + exit_code = current->exit_code; current->last_siginfo = NULL; current->ptrace_message = 0; + current->exit_code = 0; /* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; @@ -2328,9 +2334,10 @@ static void ptrace_stop(int exit_code, int why, int clear_code, * This sets TIF_SIGPENDING, but never clears it. */ recalc_sigpending_tsk(current); + return exit_code; } -static void ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) +static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) { kernel_siginfo_t info; @@ -2341,18 +2348,21 @@ static void ptrace_do_notify(int signr, int exit_code, int why, unsigned long me info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); /* Let the debugger run. */ - ptrace_stop(exit_code, why, 1, message, &info); + return ptrace_stop(exit_code, why, 1, message, &info); } -void ptrace_notify(int exit_code, unsigned long message) +int ptrace_notify(int exit_code, unsigned long message) { + int signr; + BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); if (unlikely(task_work_pending(current))) task_work_run(); spin_lock_irq(¤t->sighand->siglock); - ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); + signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); spin_unlock_irq(¤t->sighand->siglock); + return signr; } /** @@ -2511,7 +2521,6 @@ static void do_jobctl_trap(void) } else { WARN_ON_ONCE(!signr); ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL); - current->exit_code = 0; } } @@ -2564,15 +2573,12 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; - ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); + signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); /* We're back. Did the debugger cancel the sig? */ - signr = current->exit_code; if (signr == 0) return signr; - current->exit_code = 0; - /* * Update the siginfo structure if the signal has * changed. If the debugger wanted something -- cgit v1.2.3-71-gd317 From 86fc59ef818beb0e1945d17f8e734898baba7e4e Mon Sep 17 00:00:00 2001 From: Colin Foster Date: Sun, 13 Mar 2022 15:45:23 -0700 Subject: regmap: add configurable downshift for addresses Add an additional reg_downshift to be applied to register addresses before any register accesses. An example of a device that uses this is a VSC7514 chip, which require each register address to be downshifted by two if the access is performed over a SPI bus. Signed-off-by: Colin Foster Link: https://lore.kernel.org/r/20220313224524.399947-2-colin.foster@in-advantage.com Signed-off-by: Mark Brown --- drivers/base/regmap/internal.h | 1 + drivers/base/regmap/regmap.c | 5 +++++ include/linux/regmap.h | 3 +++ 3 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index b1905916f7af..88f710e7ce31 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -31,6 +31,7 @@ struct regmap_format { size_t buf_size; size_t reg_bytes; size_t pad_bytes; + size_t reg_downshift; size_t val_bytes; void (*format_write)(struct regmap *map, unsigned int reg, unsigned int val); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 8f9fe5fd4707..1c7c6d6361af 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -823,6 +823,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; + map->format.reg_downshift = config->reg_downshift; map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); map->format.buf_size = DIV_ROUND_UP(config->reg_bits + config->val_bits + config->pad_bits, 8); @@ -1735,6 +1736,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, return ret; } + reg >>= map->format.reg_downshift; map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map->write_flag_mask); @@ -1905,6 +1907,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, return ret; } + reg >>= map->format.reg_downshift; map->format.format_write(map, reg, val); trace_regmap_hw_write_start(map, reg, 1); @@ -2346,6 +2349,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, unsigned int reg = regs[i].reg; unsigned int val = regs[i].def; trace_regmap_hw_write_start(map, reg, 1); + reg >>= map->format.reg_downshift; map->format.format_reg(u8, reg, map->reg_shift); u8 += reg_bytes + pad_bytes; map->format.format_val(u8, val, 0); @@ -2673,6 +2677,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return ret; } + reg >>= map->format.reg_downshift; map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map->read_flag_mask); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 22652e5fbc38..40fb9399add6 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -237,6 +237,8 @@ typedef void (*regmap_unlock)(void *); * @reg_stride: The register address stride. Valid register addresses are a * multiple of this value. If set to 0, a value of 1 will be * used. + * @reg_downshift: The number of bits to downshift the register before + * performing any operations. * @pad_bits: Number of bits of padding between register and value. * @val_bits: Number of bits in a register value, mandatory. * @@ -360,6 +362,7 @@ struct regmap_config { int reg_bits; int reg_stride; + int reg_downshift; int pad_bits; int val_bits; -- cgit v1.2.3-71-gd317 From 0074f3f2b1e43d3cedd97e47fb6980db6d2ba79e Mon Sep 17 00:00:00 2001 From: Colin Foster Date: Sun, 13 Mar 2022 15:45:24 -0700 Subject: regmap: allow a defined reg_base to be added to every address There's an inconsistency that arises when a register set can be accessed internally via MMIO, or externally via SPI. The VSC7514 chip allows both modes of operation. When internally accessed, the system utilizes __iomem, devm_ioremap_resource, and devm_regmap_init_mmio. For SPI it isn't possible to utilize memory-mapped IO. To properly operate, the resource base must be added to the register before every operation. Signed-off-by: Colin Foster Link: https://lore.kernel.org/r/20220313224524.399947-3-colin.foster@in-advantage.com Signed-off-by: Mark Brown --- drivers/base/regmap/internal.h | 1 + drivers/base/regmap/regmap.c | 6 ++++++ include/linux/regmap.h | 3 +++ 3 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 88f710e7ce31..b4df36c7b17d 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -63,6 +63,7 @@ struct regmap { regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ gfp_t alloc_flags; + unsigned int reg_base; struct device *dev; /* Device we do I/O on */ void *work_buf; /* Scratch buffer used to format I/O */ diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 1c7c6d6361af..5e12f7cb5147 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -821,6 +821,8 @@ struct regmap *__regmap_init(struct device *dev, else map->alloc_flags = GFP_KERNEL; + map->reg_base = config->reg_base; + map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; map->format.reg_downshift = config->reg_downshift; @@ -1736,6 +1738,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, return ret; } + reg += map->reg_base; reg >>= map->format.reg_downshift; map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, @@ -1907,6 +1910,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, return ret; } + reg += map->reg_base; reg >>= map->format.reg_downshift; map->format.format_write(map, reg, val); @@ -2349,6 +2353,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, unsigned int reg = regs[i].reg; unsigned int val = regs[i].def; trace_regmap_hw_write_start(map, reg, 1); + reg += map->reg_base; reg >>= map->format.reg_downshift; map->format.format_reg(u8, reg, map->reg_shift); u8 += reg_bytes + pad_bytes; @@ -2677,6 +2682,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return ret; } + reg += map->reg_base; reg >>= map->format.reg_downshift; map->format.format_reg(map->work_buf, reg, map->reg_shift); regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 40fb9399add6..de81a94d7b30 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -239,6 +239,8 @@ typedef void (*regmap_unlock)(void *); * used. * @reg_downshift: The number of bits to downshift the register before * performing any operations. + * @reg_base: Value to be added to every register address before performing any + * operation. * @pad_bits: Number of bits of padding between register and value. * @val_bits: Number of bits in a register value, mandatory. * @@ -363,6 +365,7 @@ struct regmap_config { int reg_bits; int reg_stride; int reg_downshift; + unsigned int reg_base; int pad_bits; int val_bits; -- cgit v1.2.3-71-gd317 From 046e1537a3cf0adc68fe865b5dc9a7e731cc63b3 Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Tue, 15 Mar 2022 10:18:32 +0100 Subject: net: set default rss queues num to physical cores / 2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Network drivers can call to netif_get_num_default_rss_queues to get the default number of receive queues to use. Right now, this default number is min(8, num_online_cpus()). Instead, as suggested by Jakub, use the number of physical cores divided by 2 as a way to avoid wasting CPU resources and to avoid using both CPU threads, but still allowing to scale for high-end processors with many cores. As an exception, select 2 queues for processors with 2 cores, because otherwise it won't take any advantage of RSS despite being SMP capable. Tested: Processor Intel Xeon E5-2620 (2 sockets, 6 cores/socket, 2 threads/core). NIC Broadcom NetXtreme II BCM57810 (10GBps). Ran some tests with `perf stat iperf3 -R`, with parallelisms of 1, 8 and 24, getting the following results: - Number of queues: 6 (instead of 8) - Network throughput: not affected - CPU usage: utilized 0.05-0.12 CPUs more than before (having 24 CPUs this is only 0.2-0.5% higher) - Reduced the number of context switches by 7-50%, being more noticeable when using a higher number of parallel threads. Suggested-by: Jakub Kicinski Signed-off-by: Íñigo Huguet Link: https://lore.kernel.org/r/20220315091832.13873-1-ihuguet@redhat.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 1 - net/core/dev.c | 20 ++++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8cbe96ce0a2c..e01a8ce7181f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3664,7 +3664,6 @@ static inline unsigned int get_netdev_rx_queue_index( } #endif -#define DEFAULT_MAX_NUM_RSS_QUEUES (8) int netif_get_num_default_rss_queues(void); enum skb_free_reason { diff --git a/net/core/dev.c b/net/core/dev.c index 75bab5b0dbae..8e0cc5f2020d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2990,13 +2990,25 @@ EXPORT_SYMBOL(netif_set_real_num_queues); /** * netif_get_num_default_rss_queues - default number of RSS queues * - * This routine should set an upper limit on the number of RSS queues - * used by default by multiqueue devices. + * Default value is the number of physical cores if there are only 1 or 2, or + * divided by 2 if there are more. */ int netif_get_num_default_rss_queues(void) { - return is_kdump_kernel() ? - 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); + cpumask_var_t cpus; + int cpu, count = 0; + + if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) + return 1; + + cpumask_copy(cpus, cpu_online_mask); + for_each_cpu(cpu, cpus) { + ++count; + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + } + free_cpumask_var(cpus); + + return count > 2 ? DIV_ROUND_UP(count, 2) : count; } EXPORT_SYMBOL(netif_get_num_default_rss_queues); -- cgit v1.2.3-71-gd317 From 0c125f87a84097c182c481be7497af9f816e5db5 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sat, 26 Feb 2022 05:07:22 +0100 Subject: clk: fixed-factor: Introduce devm_clk_hw_register_fixed_factor_index() Add an API for a fixed factor clk that uses an index for the parent instead of a string name. This allows us to move drivers away from the string based method of describing parents and use the DT/firmware based method instead. Signed-off-by: Marek Vasut Cc: Michael Turquette Cc: Rob Herring Cc: Stephen Boyd Cc: devicetree@vger.kernel.org Link: https://lore.kernel.org/r/20220226040723.143705-2-marex@denx.de [sboyd@kernel.org: Expose a new API instead of internal function] Signed-off-by: Stephen Boyd --- drivers/clk/clk-fixed-factor.c | 22 ++++++++++++++++++++++ include/linux/clk-provider.h | 3 +++ 2 files changed, 25 insertions(+) (limited to 'include/linux') diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 4e4b6d367612..54942d758ee6 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -131,6 +131,28 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np, return hw; } +/** + * devm_clk_hw_register_fixed_factor_index - Register a fixed factor clock with + * parent from DT index + * @dev: device that is registering this clock + * @name: name of this clock + * @index: index of phandle in @dev 'clocks' property + * @flags: fixed factor flags + * @mult: multiplier + * @div: divider + * + * Return: Pointer to fixed factor clk_hw structure that was registered or + * an error pointer. + */ +struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev, + const char *name, unsigned int index, unsigned long flags, + unsigned int mult, unsigned int div) +{ + return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, index, + flags, mult, div, true); +} +EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index); + struct clk_hw *clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div) diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2faa6f7aa8a8..b7a7923f6bbb 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1003,6 +1003,9 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw); struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); +struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev, + const char *name, unsigned int index, unsigned long flags, + unsigned int mult, unsigned int div); /** * struct clk_fractional_divider - adjustable fractional divider clock * -- cgit v1.2.3-71-gd317 From d714fb25e755ad96b699993fac47f48c4d6cebe9 Mon Sep 17 00:00:00 2001 From: Jae Hyun Yoo Date: Fri, 18 Mar 2022 13:41:33 -0700 Subject: i2c: add tracepoints for I2C slave events I2C slave events tracepoints can be enabled by: echo 1 > /sys/kernel/tracing/events/i2c_slave/enable and logs in /sys/kernel/tracing/trace will look like: ... i2c_slave: i2c-0 a=010 ret=0 WR_REQ [] ... i2c_slave: i2c-0 a=010 ret=0 WR_RCV [02] ... i2c_slave: i2c-0 a=010 ret=0 WR_RCV [0c] ... i2c_slave: i2c-0 a=010 ret=0 STOP [] ... i2c_slave: i2c-0 a=010 ret=0 RD_REQ [04] ... i2c_slave: i2c-0 a=010 ret=0 RD_PRO [b4] ... i2c_slave: i2c-0 a=010 ret=0 STOP [] formatted as: i2c- a= ret= <- callback return value [] trace printings can be selected by adding a filter like: echo adapter_nr==1 >/sys/kernel/tracing/events/i2c_slave/filter Signed-off-by: Jae Hyun Yoo Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-slave.c | 15 +++++++++ include/linux/i2c.h | 8 ++--- include/trace/events/i2c_slave.h | 67 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 6 deletions(-) create mode 100644 include/trace/events/i2c_slave.h (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 1589179d5eb9..e3765e12f93b 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -14,6 +14,9 @@ #include "i2c-core.h" +#define CREATE_TRACE_POINTS +#include + int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; @@ -79,6 +82,18 @@ int i2c_slave_unregister(struct i2c_client *client) } EXPORT_SYMBOL_GPL(i2c_slave_unregister); +int i2c_slave_event(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + int ret = client->slave_cb(client, event, val); + + if (trace_i2c_slave_enabled()) + trace_i2c_slave(client, event, val, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_slave_event); + /** * i2c_detect_slave_mode - detect operation mode * @dev: The device owning the bus diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7d4f52ceb7b5..fbda5ada2afc 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -392,12 +392,8 @@ enum i2c_slave_event { int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); int i2c_slave_unregister(struct i2c_client *client); bool i2c_detect_slave_mode(struct device *dev); - -static inline int i2c_slave_event(struct i2c_client *client, - enum i2c_slave_event event, u8 *val) -{ - return client->slave_cb(client, event, val); -} +int i2c_slave_event(struct i2c_client *client, + enum i2c_slave_event event, u8 *val); #else static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } #endif diff --git a/include/trace/events/i2c_slave.h b/include/trace/events/i2c_slave.h new file mode 100644 index 000000000000..811166abbe3a --- /dev/null +++ b/include/trace/events/i2c_slave.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * I2C slave tracepoints + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i2c_slave + +#if !defined(_TRACE_I2C_SLAVE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_I2C_SLAVE_H + +#include +#include + +TRACE_DEFINE_ENUM(I2C_SLAVE_READ_REQUESTED); +TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_REQUESTED); +TRACE_DEFINE_ENUM(I2C_SLAVE_READ_PROCESSED); +TRACE_DEFINE_ENUM(I2C_SLAVE_WRITE_RECEIVED); +TRACE_DEFINE_ENUM(I2C_SLAVE_STOP); + +#define show_event_type(type) \ + __print_symbolic(type, \ + { I2C_SLAVE_READ_REQUESTED, "RD_REQ" }, \ + { I2C_SLAVE_WRITE_REQUESTED, "WR_REQ" }, \ + { I2C_SLAVE_READ_PROCESSED, "RD_PRO" }, \ + { I2C_SLAVE_WRITE_RECEIVED, "WR_RCV" }, \ + { I2C_SLAVE_STOP, " STOP" }) + +TRACE_EVENT(i2c_slave, + TP_PROTO(const struct i2c_client *client, enum i2c_slave_event event, + __u8 *val, int cb_ret), + TP_ARGS(client, event, val, cb_ret), + TP_STRUCT__entry( + __field(int, adapter_nr ) + __field(int, ret ) + __field(__u16, addr ) + __field(__u16, len ) + __field(enum i2c_slave_event, event ) + __array(__u8, buf, 1) ), + + TP_fast_assign( + __entry->adapter_nr = client->adapter->nr; + __entry->addr = client->addr; + __entry->event = event; + __entry->ret = cb_ret; + switch (event) { + case I2C_SLAVE_READ_REQUESTED: + case I2C_SLAVE_READ_PROCESSED: + case I2C_SLAVE_WRITE_RECEIVED: + __entry->len = 1; + memcpy(__entry->buf, val, __entry->len); + break; + default: + __entry->len = 0; + break; + } + ), + TP_printk("i2c-%d a=%03x ret=%d %s [%*phD]", + __entry->adapter_nr, __entry->addr, __entry->ret, + show_event_type(__entry->event), __entry->len, __entry->buf + )); + +#endif /* _TRACE_I2C_SLAVE_H */ + +/* This part must be outside protection */ +#include -- cgit v1.2.3-71-gd317 From b00fa38a9c1cba044a32a601b49a55a18ed719d1 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 17 Mar 2022 21:55:52 -0700 Subject: bpf: Enable non-atomic allocations in local storage Currently, local storage memory can only be allocated atomically (GFP_ATOMIC). This restriction is too strict for sleepable bpf programs. In this patch, the verifier detects whether the program is sleepable, and passes the corresponding GFP_KERNEL or GFP_ATOMIC flag as a 5th argument to bpf_task/sk/inode_storage_get. This flag will propagate down to the local storage functions that allocate memory. Please note that bpf_task/sk/inode_storage_update_elem functions are invoked by userspace applications through syscalls. Preemption is disabled before bpf_task/sk/inode_storage_update_elem is called, which means they will always have to allocate memory atomically. Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Acked-by: KP Singh Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220318045553.3091807-2-joannekoong@fb.com --- include/linux/bpf_local_storage.h | 7 +++-- kernel/bpf/bpf_inode_storage.c | 9 +++--- kernel/bpf/bpf_local_storage.c | 58 +++++++++++++++++++++++++-------------- kernel/bpf/bpf_task_storage.c | 10 ++++--- kernel/bpf/verifier.c | 20 ++++++++++++++ net/core/bpf_sk_storage.c | 21 ++++++++------ 6 files changed, 84 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 37b3906af8b1..493e63258497 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -154,16 +154,17 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem); struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem); + bool charge_mem, gfp_t gfp_flags); int bpf_local_storage_alloc(void *owner, struct bpf_local_storage_map *smap, - struct bpf_local_storage_elem *first_selem); + struct bpf_local_storage_elem *first_selem, + gfp_t gfp_flags); struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags); + void *value, u64 map_flags, gfp_t gfp_flags); void bpf_local_storage_free_rcu(struct rcu_head *rcu); diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c index e29d9e3d853e..96be8d518885 100644 --- a/kernel/bpf/bpf_inode_storage.c +++ b/kernel/bpf/bpf_inode_storage.c @@ -136,7 +136,7 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, sdata = bpf_local_storage_update(f->f_inode, (struct bpf_local_storage_map *)map, - value, map_flags); + value, map_flags, GFP_ATOMIC); fput(f); return PTR_ERR_OR_ZERO(sdata); } @@ -169,8 +169,9 @@ static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) return err; } -BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, + void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -196,7 +197,7 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { sdata = bpf_local_storage_update( inode, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; } diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 092a1ac772d7..01aa2b51ec4d 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -63,7 +63,7 @@ static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, - void *value, bool charge_mem) + void *value, bool charge_mem, gfp_t gfp_flags) { struct bpf_local_storage_elem *selem; @@ -71,7 +71,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, return NULL; selem = bpf_map_kzalloc(&smap->map, smap->elem_size, - GFP_ATOMIC | __GFP_NOWARN); + gfp_flags | __GFP_NOWARN); if (selem) { if (value) memcpy(SDATA(selem)->data, value, smap->map.value_size); @@ -282,7 +282,8 @@ static int check_flags(const struct bpf_local_storage_data *old_sdata, int bpf_local_storage_alloc(void *owner, struct bpf_local_storage_map *smap, - struct bpf_local_storage_elem *first_selem) + struct bpf_local_storage_elem *first_selem, + gfp_t gfp_flags) { struct bpf_local_storage *prev_storage, *storage; struct bpf_local_storage **owner_storage_ptr; @@ -293,7 +294,7 @@ int bpf_local_storage_alloc(void *owner, return err; storage = bpf_map_kzalloc(&smap->map, sizeof(*storage), - GFP_ATOMIC | __GFP_NOWARN); + gfp_flags | __GFP_NOWARN); if (!storage) { err = -ENOMEM; goto uncharge; @@ -350,10 +351,10 @@ uncharge: */ struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags) + void *value, u64 map_flags, gfp_t gfp_flags) { struct bpf_local_storage_data *old_sdata = NULL; - struct bpf_local_storage_elem *selem; + struct bpf_local_storage_elem *selem = NULL; struct bpf_local_storage *local_storage; unsigned long flags; int err; @@ -365,6 +366,9 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, !map_value_has_spin_lock(&smap->map))) return ERR_PTR(-EINVAL); + if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST) + return ERR_PTR(-EINVAL); + local_storage = rcu_dereference_check(*owner_storage(smap, owner), bpf_rcu_lock_held()); if (!local_storage || hlist_empty(&local_storage->list)) { @@ -373,11 +377,11 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, if (err) return ERR_PTR(err); - selem = bpf_selem_alloc(smap, owner, value, true); + selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); if (!selem) return ERR_PTR(-ENOMEM); - err = bpf_local_storage_alloc(owner, smap, selem); + err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags); if (err) { kfree(selem); mem_uncharge(smap, owner, smap->elem_size); @@ -404,6 +408,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, } } + if (gfp_flags == GFP_KERNEL) { + selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); + if (!selem) + return ERR_PTR(-ENOMEM); + } + raw_spin_lock_irqsave(&local_storage->lock, flags); /* Recheck local_storage->list under local_storage->lock */ @@ -429,19 +439,21 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, goto unlock; } - /* local_storage->lock is held. Hence, we are sure - * we can unlink and uncharge the old_sdata successfully - * later. Hence, instead of charging the new selem now - * and then uncharge the old selem later (which may cause - * a potential but unnecessary charge failure), avoid taking - * a charge at all here (the "!old_sdata" check) and the - * old_sdata will not be uncharged later during - * bpf_selem_unlink_storage_nolock(). - */ - selem = bpf_selem_alloc(smap, owner, value, !old_sdata); - if (!selem) { - err = -ENOMEM; - goto unlock_err; + if (gfp_flags != GFP_KERNEL) { + /* local_storage->lock is held. Hence, we are sure + * we can unlink and uncharge the old_sdata successfully + * later. Hence, instead of charging the new selem now + * and then uncharge the old selem later (which may cause + * a potential but unnecessary charge failure), avoid taking + * a charge at all here (the "!old_sdata" check) and the + * old_sdata will not be uncharged later during + * bpf_selem_unlink_storage_nolock(). + */ + selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags); + if (!selem) { + err = -ENOMEM; + goto unlock_err; + } } /* First, link the new selem to the map */ @@ -463,6 +475,10 @@ unlock: unlock_err: raw_spin_unlock_irqrestore(&local_storage->lock, flags); + if (selem) { + mem_uncharge(smap, owner, smap->elem_size); + kfree(selem); + } return ERR_PTR(err); } diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c index 5da7bed0f5f6..6638a0ecc3d2 100644 --- a/kernel/bpf/bpf_task_storage.c +++ b/kernel/bpf/bpf_task_storage.c @@ -174,7 +174,8 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, bpf_task_storage_lock(); sdata = bpf_local_storage_update( - task, (struct bpf_local_storage_map *)map, value, map_flags); + task, (struct bpf_local_storage_map *)map, value, map_flags, + GFP_ATOMIC); bpf_task_storage_unlock(); err = PTR_ERR_OR_ZERO(sdata); @@ -226,8 +227,9 @@ out: return err; } -BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, - task, void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, + task, void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -250,7 +252,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) sdata = bpf_local_storage_update( task, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); unlock: bpf_task_storage_unlock(); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0287176bfe9a..6347dcdee1fd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13492,6 +13492,26 @@ static int do_misc_fixups(struct bpf_verifier_env *env) goto patch_call_imm; } + if (insn->imm == BPF_FUNC_task_storage_get || + insn->imm == BPF_FUNC_sk_storage_get || + insn->imm == BPF_FUNC_inode_storage_get) { + if (env->prog->aux->sleepable) + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_KERNEL); + else + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_ATOMIC); + insn_buf[1] = *insn; + cnt = 2; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d9c37fd10809..7aff1206a851 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -141,7 +141,7 @@ static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, if (sock) { sdata = bpf_local_storage_update( sock->sk, (struct bpf_local_storage_map *)map, value, - map_flags); + map_flags, GFP_ATOMIC); sockfd_put(sock); return PTR_ERR_OR_ZERO(sdata); } @@ -172,7 +172,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk, { struct bpf_local_storage_elem *copy_selem; - copy_selem = bpf_selem_alloc(smap, newsk, NULL, true); + copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC); if (!copy_selem) return NULL; @@ -230,7 +230,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) bpf_selem_link_map(smap, copy_selem); bpf_selem_link_storage_nolock(new_sk_storage, copy_selem); } else { - ret = bpf_local_storage_alloc(newsk, smap, copy_selem); + ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC); if (ret) { kfree(copy_selem); atomic_sub(smap->elem_size, @@ -255,8 +255,9 @@ out: return ret; } -BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, + void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -277,7 +278,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, refcount_inc_not_zero(&sk->sk_refcnt)) { sdata = bpf_local_storage_update( sk, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); /* sk must be a fullsock (guaranteed by verifier), * so sock_gen_put() is unnecessary. */ @@ -417,14 +418,16 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) return false; } -BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, + void *, value, u64, flags, gfp_t, gfp_flags) { WARN_ON_ONCE(!bpf_rcu_lock_held()); if (in_hardirq() || in_nmi()) return (unsigned long)NULL; - return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags); + return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags, + gfp_flags); } BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map, -- cgit v1.2.3-71-gd317 From 3387ce4d8a5f2956fab827edf499fe6780e83faa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 21 Mar 2022 11:05:50 +0100 Subject: headers/prep: Fix header to build standalone: Add the dependency to , because cgroup_move_task() will dereference 'struct css_set'. ( Only older toolchains are affected, due to variations in the implementation of rcu_assign_pointer() et al. ) Cc: Peter Zijlstra Cc: Linus Torvalds Reported-by: Sachin Sant Reported-by: Andrew Morton Reported-by: Borislav Petkov Signed-off-by: Ingo Molnar --- include/linux/psi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/psi.h b/include/linux/psi.h index 7f7d1d88c3bb..89784763d19e 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -6,6 +6,7 @@ #include #include #include +#include struct seq_file; struct css_set; -- cgit v1.2.3-71-gd317 From a43bf604446414103b7535f38e739b65601c4fb2 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 16 Mar 2022 18:24:26 -0400 Subject: NFSv4.1 provide mount option to toggle trunking discovery Introduce a new mount option -- trunkdiscovery,notrunkdiscovery -- to toggle whether or not the client will engage in actively discovery of trunking locations. v2 make notrunkdiscovery default Signed-off-by: Olga Kornievskaia Fixes: 1976b2b31462 ("NFSv4.1 query for fs_location attr on a new file system") Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 ++- fs/nfs/fs_context.c | 8 ++++++++ include/linux/nfs_fs_sb.h | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d1f34229e11a..e828504cc396 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -857,7 +857,8 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str } if (clp->rpc_ops->discover_trunking != NULL && - (server->caps & NFS_CAP_FS_LOCATIONS)) { + (server->caps & NFS_CAP_FS_LOCATIONS && + (server->flags & NFS_MOUNT_TRUNK_DISCOVERY))) { error = clp->rpc_ops->discover_trunking(server, mntfh); if (error < 0) return error; diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index ea17fa1f31ec..e2d59bb5e6bb 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -80,6 +80,7 @@ enum nfs_param { Opt_source, Opt_tcp, Opt_timeo, + Opt_trunkdiscovery, Opt_udp, Opt_v, Opt_vers, @@ -180,6 +181,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_string("source", Opt_source), fsparam_flag ("tcp", Opt_tcp), fsparam_u32 ("timeo", Opt_timeo), + fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery), fsparam_flag ("udp", Opt_udp), fsparam_flag ("v2", Opt_v), fsparam_flag ("v3", Opt_v), @@ -529,6 +531,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, else ctx->flags &= ~NFS_MOUNT_NOCTO; break; + case Opt_trunkdiscovery: + if (result.negated) + ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY; + else + ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY; + break; case Opt_ac: if (result.negated) ctx->flags |= NFS_MOUNT_NOAC; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index ca0959e51e81..b0e3fd550122 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -151,6 +151,7 @@ struct nfs_server { #define NFS_MOUNT_SOFTREVAL 0x800000 #define NFS_MOUNT_WRITE_EAGER 0x01000000 #define NFS_MOUNT_WRITE_WAIT 0x02000000 +#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ -- cgit v1.2.3-71-gd317 From 028152260c5782504995e1fe3910ad39d4e4f34a Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 21 Mar 2022 11:35:29 -0500 Subject: Revert "of: base: Introduce of_alias_get_alias_list() to check alias IDs" This reverts commit b1078c355d76769b5ddefc67d143fbd9b6e52c05. The single user of of_alias_get_alias_list(), drivers/tty/serial/xilinx_uartps.c, has since been refactored and no longer needs this function. It also contained a Smatch checker warning: drivers/of/base.c:2038 of_alias_get_alias_list() warn: passing negative bit value 's32min-(-2),0-s32max' to 'set_bit()' Reported-by: Dan Carpenter Signed-off-by: Rob Herring --- drivers/of/base.c | 54 ------------------------------------------------------ include/linux/of.h | 10 ---------- 2 files changed, 64 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/base.c b/drivers/of/base.c index e7d92b67cb8a..d4f98c8469ed 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -16,7 +16,6 @@ #define pr_fmt(fmt) "OF: " fmt -#include #include #include #include @@ -1992,59 +1991,6 @@ int of_alias_get_id(struct device_node *np, const char *stem) } EXPORT_SYMBOL_GPL(of_alias_get_id); -/** - * of_alias_get_alias_list - Get alias list for the given device driver - * @matches: Array of OF device match structures to search in - * @stem: Alias stem of the given device_node - * @bitmap: Bitmap field pointer - * @nbits: Maximum number of alias IDs which can be recorded in bitmap - * - * The function travels the lookup table to record alias ids for the given - * device match structures and alias stem. - * - * Return: 0 or -ENOSYS when !CONFIG_OF or - * -EOVERFLOW if alias ID is greater then allocated nbits - */ -int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits) -{ - struct alias_prop *app; - int ret = 0; - - /* Zero bitmap field to make sure that all the time it is clean */ - bitmap_zero(bitmap, nbits); - - mutex_lock(&of_mutex); - pr_debug("%s: Looking for stem: %s\n", __func__, stem); - list_for_each_entry(app, &aliases_lookup, link) { - pr_debug("%s: stem: %s, id: %d\n", - __func__, app->stem, app->id); - - if (strcmp(app->stem, stem) != 0) { - pr_debug("%s: stem comparison didn't pass %s\n", - __func__, app->stem); - continue; - } - - if (of_match_node(matches, app->np)) { - pr_debug("%s: Allocated ID %d\n", __func__, app->id); - - if (app->id >= nbits) { - pr_warn("%s: ID %d >= than bitmap field %d\n", - __func__, app->id, nbits); - ret = -EOVERFLOW; - } else { - set_bit(app->id, bitmap); - } - } - } - mutex_unlock(&of_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(of_alias_get_alias_list); - /** * of_alias_get_highest_id - Get highest alias id for the given stem * @stem: Alias stem to be examined diff --git a/include/linux/of.h b/include/linux/of.h index 2dc77430a91a..04971e85fbc9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -388,9 +388,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); -extern int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits); extern int of_machine_is_compatible(const char *compat); @@ -766,13 +763,6 @@ static inline int of_alias_get_highest_id(const char *stem) return -ENOSYS; } -static inline int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits) -{ - return -ENOSYS; -} - static inline int of_machine_is_compatible(const char *compat) { return 0; -- cgit v1.2.3-71-gd317 From 4c65422901154766e5cee17875ed680366a4a141 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 7 Jan 2022 13:45:25 -0500 Subject: mm/gup: Remove an assumption of a contiguous memmap This assumption needs the inverse of nth_page(), which is temporarily named page_nth() until it's renamed later in this series. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 2 ++ mm/gup.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 0201d258c646..e3f8755f65ed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -212,8 +212,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +#define page_nth(head, tail) (page_to_pfn(tail) - page_to_pfn(head)) #else #define nth_page(page,n) ((page) + (n)) +#define page_nth(head, tail) ((tail) - (head)) #endif /* to align the pointer to the (next) page boundary */ diff --git a/mm/gup.c b/mm/gup.c index d585aa06afb2..ad120f470735 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -261,8 +261,8 @@ static inline struct page *compound_range_next(struct page *start, next = nth_page(start, i); page = compound_head(next); if (PageHead(page)) - nr = min_t(unsigned int, - page + compound_nr(page) - next, npages - i); + nr = min_t(unsigned int, npages - i, + compound_nr(page) - page_nth(page, next)); *ntails = nr; return page; -- cgit v1.2.3-71-gd317 From 5232c63f46fdd779303527ec36c518cc1e9c6b4e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 6 Jan 2022 16:46:43 -0500 Subject: mm: Make compound_pincount always available Move compound_pincount from the third page to the second page, which means it's available for all compound pages. That lets us delete hpage_pincount_available(). On 32-bit systems, there isn't enough space for both compound_pincount and compound_nr in the second page (it would collide with page->private, which is in use for pages in the swap cache), so revert the optimisation of storing both compound_order and compound_nr on 32-bit systems. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: John Hubbard Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- Documentation/core-api/pin_user_pages.rst | 18 +++++++++--------- include/linux/mm.h | 21 ++++++++------------- include/linux/mm_types.h | 7 +++++-- mm/debug.c | 14 ++++---------- mm/gup.c | 20 +++++++++----------- mm/hugetlb.c | 4 ++++ mm/page_alloc.c | 3 +-- mm/rmap.c | 6 ++---- 8 files changed, 42 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst index fcf605be43d0..b18416f4500f 100644 --- a/Documentation/core-api/pin_user_pages.rst +++ b/Documentation/core-api/pin_user_pages.rst @@ -55,18 +55,18 @@ flags the caller provides. The caller is required to pass in a non-null struct pages* array, and the function then pins pages by incrementing each by a special value: GUP_PIN_COUNTING_BIAS. -For huge pages (and in fact, any compound page of more than 2 pages), the -GUP_PIN_COUNTING_BIAS scheme is not used. Instead, an exact form of pin counting -is achieved, by using the 3rd struct page in the compound page. A new struct -page field, hpage_pinned_refcount, has been added in order to support this. +For compound pages, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead, +an exact form of pin counting is achieved, by using the 2nd struct page +in the compound page. A new struct page field, compound_pincount, has +been added in order to support this. This approach for compound pages avoids the counting upper limit problems that are discussed below. Those limitations would have been aggravated severely by huge pages, because each tail page adds a refcount to the head page. And in -fact, testing revealed that, without a separate hpage_pinned_refcount field, +fact, testing revealed that, without a separate compound_pincount field, page overflows were seen in some huge page stress tests. -This also means that huge pages and compound pages (of order > 1) do not suffer +This also means that huge pages and compound pages do not suffer from the false positives problem that is mentioned below.:: Function @@ -264,9 +264,9 @@ place.) Other diagnostics ================= -dump_page() has been enhanced slightly, to handle these new counting fields, and -to better report on compound pages in general. Specifically, for compound pages -with order > 1, the exact (hpage_pinned_refcount) pincount is reported. +dump_page() has been enhanced slightly, to handle these new counting +fields, and to better report on compound pages in general. Specifically, +for compound pages, the exact (compound_pincount) pincount is reported. References ========== diff --git a/include/linux/mm.h b/include/linux/mm.h index e3f8755f65ed..c64bd0b67d75 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -887,17 +887,6 @@ static inline void destroy_compound_page(struct page *page) compound_page_dtors[page[1].compound_dtor](page); } -static inline bool hpage_pincount_available(struct page *page) -{ - /* - * Can the page->hpage_pinned_refcount field be used? That field is in - * the 3rd page of the compound page, so the smallest (2-page) compound - * pages cannot support it. - */ - page = compound_head(page); - return PageCompound(page) && compound_order(page) > 1; -} - static inline int head_compound_pincount(struct page *head) { return atomic_read(compound_pincount_ptr(head)); @@ -905,7 +894,7 @@ static inline int head_compound_pincount(struct page *head) static inline int compound_pincount(struct page *page) { - VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); + VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); return head_compound_pincount(page); } @@ -913,7 +902,9 @@ static inline int compound_pincount(struct page *page) static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; +#ifdef CONFIG_64BIT page[1].compound_nr = 1U << order; +#endif } /* Returns the number of pages in this potentially compound page. */ @@ -921,7 +912,11 @@ static inline unsigned long compound_nr(struct page *page) { if (!PageHead(page)) return 1; +#ifdef CONFIG_64BIT return page[1].compound_nr; +#else + return 1UL << compound_order(page); +#endif } /* Returns the number of bytes in this potentially compound page. */ @@ -1269,7 +1264,7 @@ void unpin_user_pages(struct page **pages, unsigned long npages); */ static inline bool page_maybe_dma_pinned(struct page *page) { - if (hpage_pincount_available(page)) + if (PageCompound(page)) return compound_pincount(page) > 0; /* diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 475bdb282769..0e274c9b934e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -135,11 +135,14 @@ struct page { unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; + atomic_t compound_pincount; +#ifdef CONFIG_64BIT unsigned int compound_nr; /* 1 << compound_order */ +#endif }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ - atomic_t hpage_pinned_refcount; + unsigned long _compound_pad_2; /* For both global and memcg */ struct list_head deferred_list; }; @@ -300,7 +303,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) static inline atomic_t *compound_pincount_ptr(struct page *page) { - return &page[2].hpage_pinned_refcount; + return &page[1].compound_pincount; } /* diff --git a/mm/debug.c b/mm/debug.c index bc9ac87f0e08..c4cf44266430 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -92,16 +92,10 @@ static void __dump_page(struct page *page) page, page_ref_count(head), mapcount, mapping, page_to_pgoff(page), page_to_pfn(page)); if (compound) { - if (hpage_pincount_available(page)) { - pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", - head, compound_order(head), - head_compound_mapcount(head), - head_compound_pincount(head)); - } else { - pr_warn("head:%p order:%u compound_mapcount:%d\n", - head, compound_order(head), - head_compound_mapcount(head)); - } + pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", + head, compound_order(head), + head_compound_mapcount(head), + head_compound_pincount(head)); } #ifdef CONFIG_MEMCG diff --git a/mm/gup.c b/mm/gup.c index 1809dc037a8e..56b6b01a430b 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -99,12 +99,11 @@ retry: * * FOLL_GET: page's refcount will be incremented by @refs. * - * FOLL_PIN on compound pages that are > two pages long: page's refcount will - * be incremented by @refs, and page[2].hpage_pinned_refcount will be - * incremented by @refs * GUP_PIN_COUNTING_BIAS. + * FOLL_PIN on compound pages: page's refcount will be incremented by + * @refs, and page[1].compound_pincount will be incremented by @refs. * - * FOLL_PIN on normal pages, or compound pages that are two pages long: - * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS. + * FOLL_PIN on normal pages: page's refcount will be incremented by + * @refs * GUP_PIN_COUNTING_BIAS. * * Return: head page (with refcount appropriately incremented) for success, or * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's @@ -135,16 +134,15 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page, return NULL; /* - * When pinning a compound page of order > 1 (which is - * what hpage_pincount_available() checks for), use an - * exact count to track it. + * When pinning a compound page, use an exact count to + * track it. * * However, be sure to *also* increment the normal page * refcount field at least once, so that the page really * is pinned. That's why the refcount from the earlier * try_get_compound_head() is left intact. */ - if (hpage_pincount_available(page)) + if (PageHead(page)) atomic_add(refs, compound_pincount_ptr(page)); else page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1)); @@ -166,7 +164,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags) if (flags & FOLL_PIN) { mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, refs); - if (hpage_pincount_available(page)) + if (PageHead(page)) atomic_sub(refs, compound_pincount_ptr(page)); else refs *= GUP_PIN_COUNTING_BIAS; @@ -211,7 +209,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags) * increment the normal page refcount field at least once, * so that the page really is pinned. */ - if (hpage_pincount_available(page)) { + if (PageHead(page)) { page_ref_add(page, 1); atomic_add(1, compound_pincount_ptr(page)); } else { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 43fb3155298e..785d6e340292 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1320,7 +1320,9 @@ static void __destroy_compound_gigantic_page(struct page *page, } set_compound_order(page, 0); +#ifdef CONFIG_64BIT page[1].compound_nr = 0; +#endif __ClearPageHead(page); } @@ -1812,7 +1814,9 @@ out_error: for (; j < nr_pages; j++, p = mem_map_next(p, page, j)) __ClearPageReserved(p); set_compound_order(page, 0); +#ifdef CONFIG_64BIT page[1].compound_nr = 0; +#endif __ClearPageHead(page); return false; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3589febc6d31..02283598fd14 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -734,8 +734,7 @@ static void prep_compound_head(struct page *page, unsigned int order) set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); set_compound_order(page, order); atomic_set(compound_mapcount_ptr(page), -1); - if (hpage_pincount_available(page)) - atomic_set(compound_pincount_ptr(page), 0); + atomic_set(compound_pincount_ptr(page), 0); } static void prep_compound_tail(struct page *head, int tail_idx) diff --git a/mm/rmap.c b/mm/rmap.c index c7921c102bc0..1a13d5d6cfc7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1219,8 +1219,7 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON_PAGE(!PageTransHuge(page), page); /* increment count (starts at -1) */ atomic_set(compound_mapcount_ptr(page), 0); - if (hpage_pincount_available(page)) - atomic_set(compound_pincount_ptr(page), 0); + atomic_set(compound_pincount_ptr(page), 0); __mod_lruvec_page_state(page, NR_ANON_THPS, nr); } else { @@ -2353,8 +2352,7 @@ void hugepage_add_new_anon_rmap(struct page *page, { BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(compound_mapcount_ptr(page), 0); - if (hpage_pincount_available(page)) - atomic_set(compound_pincount_ptr(page), 0); + atomic_set(compound_pincount_ptr(page), 0); __page_set_anon_rmap(page, vma, address, 1); } -- cgit v1.2.3-71-gd317 From 3d11b225aeb184bc3dc9b4b27b302815a7c531aa Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 27 Dec 2021 18:28:58 -0500 Subject: mm: Add folio_pincount_ptr() This is the folio equivalent of compound_pincount_ptr(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index c64bd0b67d75..c45739dfdd04 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1544,6 +1544,11 @@ static inline unsigned long folio_pfn(struct folio *folio) return page_to_pfn(&folio->page); } +static inline atomic_t *folio_pincount_ptr(struct folio *folio) +{ + return &folio_page(folio, 1)->compound_pincount; +} + /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_pinnable_page(struct page *page) -- cgit v1.2.3-71-gd317 From 0b90ddae13441c43a30d2e2689b8193a81891c92 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 27 Dec 2021 18:40:41 -0500 Subject: mm: Turn page_maybe_dma_pinned() into folio_maybe_dma_pinned() Replace three calls to compound_head() with one. This removes the last user of compound_pincount(), so remove that helper too. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 129 ++++++++++++++++++++++++++--------------------------- 1 file changed, 63 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index c45739dfdd04..35e453ac5c0f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -892,13 +892,6 @@ static inline int head_compound_pincount(struct page *head) return atomic_read(compound_pincount_ptr(head)); } -static inline int compound_pincount(struct page *page) -{ - VM_BUG_ON_PAGE(!PageCompound(page), page); - page = compound_head(page); - return head_compound_pincount(page); -} - static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; @@ -1236,70 +1229,11 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); -/** - * page_maybe_dma_pinned - Report if a page is pinned for DMA. - * @page: The page. - * - * This function checks if a page has been pinned via a call to - * a function in the pin_user_pages() family. - * - * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, - * because it means "definitely not pinned for DMA", but true means "probably - * pinned for DMA, but possibly a false positive due to having at least - * GUP_PIN_COUNTING_BIAS worth of normal page references". - * - * False positives are OK, because: a) it's unlikely for a page to get that many - * refcounts, and b) all the callers of this routine are expected to be able to - * deal gracefully with a false positive. - * - * For huge pages, the result will be exactly correct. That's because we have - * more tracking data available: the 3rd struct page in the compound page is - * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS - * scheme). - * - * For more information, please see Documentation/core-api/pin_user_pages.rst. - * - * Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. - */ -static inline bool page_maybe_dma_pinned(struct page *page) -{ - if (PageCompound(page)) - return compound_pincount(page) > 0; - - /* - * page_ref_count() is signed. If that refcount overflows, then - * page_ref_count() returns a negative value, and callers will avoid - * further incrementing the refcount. - * - * Here, for that overflow case, use the signed bit to count a little - * bit higher via unsigned math, and thus still get an accurate result. - */ - return ((unsigned int)page_ref_count(compound_head(page))) >= - GUP_PIN_COUNTING_BIAS; -} - static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } -/* - * This should most likely only be called during fork() to see whether we - * should break the cow immediately for a page on the src mm. - */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) -{ - if (!is_cow_mapping(vma->vm_flags)) - return false; - - if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) - return false; - - return page_maybe_dma_pinned(page); -} - #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif @@ -1549,6 +1483,69 @@ static inline atomic_t *folio_pincount_ptr(struct folio *folio) return &folio_page(folio, 1)->compound_pincount; } +/** + * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. + * @folio: The folio. + * + * This function checks if a folio has been pinned via a call to + * a function in the pin_user_pages() family. + * + * For small folios, the return value is partially fuzzy: false is not fuzzy, + * because it means "definitely not pinned for DMA", but true means "probably + * pinned for DMA, but possibly a false positive due to having at least + * GUP_PIN_COUNTING_BIAS worth of normal folio references". + * + * False positives are OK, because: a) it's unlikely for a folio to + * get that many refcounts, and b) all the callers of this routine are + * expected to be able to deal gracefully with a false positive. + * + * For large folios, the result will be exactly correct. That's because + * we have more tracking data available: the compound_pincount is used + * instead of the GUP_PIN_COUNTING_BIAS scheme. + * + * For more information, please see Documentation/core-api/pin_user_pages.rst. + * + * Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. + */ +static inline bool folio_maybe_dma_pinned(struct folio *folio) +{ + if (folio_test_large(folio)) + return atomic_read(folio_pincount_ptr(folio)) > 0; + + /* + * folio_ref_count() is signed. If that refcount overflows, then + * folio_ref_count() returns a negative value, and callers will avoid + * further incrementing the refcount. + * + * Here, for that overflow case, use the sign bit to count a little + * bit higher via unsigned math, and thus still get an accurate result. + */ + return ((unsigned int)folio_ref_count(folio)) >= + GUP_PIN_COUNTING_BIAS; +} + +static inline bool page_maybe_dma_pinned(struct page *page) +{ + return folio_maybe_dma_pinned(page_folio(page)); +} + +/* + * This should most likely only be called during fork() to see whether we + * should break the cow immediately for a page on the src mm. + */ +static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, + struct page *page) +{ + if (!is_cow_mapping(vma->vm_flags)) + return false; + + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) + return false; + + return page_maybe_dma_pinned(page); +} + /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_pinnable_page(struct page *page) -- cgit v1.2.3-71-gd317 From 40fcc7fc2c3838f3afe07a3a72709b45566e6cdb Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 29 Dec 2021 12:23:55 -0500 Subject: mm: Remove page_cache_add_speculative() and page_cache_get_speculative() These wrappers have no more callers, so delete them. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 7 +++---- include/linux/pagemap.h | 10 ---------- 2 files changed, 3 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 35e453ac5c0f..b764057022c8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1215,10 +1215,9 @@ static inline void put_page(struct page *page) * applications that don't have huge page reference counts, this won't be an * issue. * - * Locking: the lockless algorithm described in page_cache_get_speculative() - * and page_cache_gup_pin_speculative() provides safe operation for - * get_user_pages and page_mkclean and other calls that race to set up page - * table entries. + * Locking: the lockless algorithm described in folio_try_get_rcu() + * provides safe operation for get_user_pages(), page_mkclean() and + * other calls that race to set up page table entries. */ #define GUP_PIN_COUNTING_BIAS (1U << 10) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 270bf5136c34..cdb3f118603a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -283,16 +283,6 @@ static inline struct inode *folio_inode(struct folio *folio) return folio->mapping->host; } -static inline bool page_cache_add_speculative(struct page *page, int count) -{ - return folio_ref_try_add_rcu((struct folio *)page, count); -} - -static inline bool page_cache_get_speculative(struct page *page) -{ - return page_cache_add_speculative(page, 1); -} - /** * folio_attach_private - Attach private data to a folio. * @folio: Folio to attach data to. -- cgit v1.2.3-71-gd317 From 822951d84684d7a0c4f45e7231c960e7fe786d8f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 8 Jan 2022 00:15:04 -0500 Subject: mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head() follow_hugetlb_page() only cares about success or failure, so it doesn't need to know the type of the returned pointer, only whether it's NULL or not. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 3 --- mm/gup.c | 2 +- mm/hugetlb.c | 7 +++---- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index b764057022c8..dca5c99395c9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1124,9 +1124,6 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); -struct page *try_grab_compound_head(struct page *page, int refs, - unsigned int flags); - static inline __must_check bool try_get_page(struct page *page) { diff --git a/mm/gup.c b/mm/gup.c index cbbddcf8ff3f..014004102e26 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -133,7 +133,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) return NULL; } -struct page *try_grab_compound_head(struct page *page, +static inline struct page *try_grab_compound_head(struct page *page, int refs, unsigned int flags) { return &try_grab_folio(page, refs, flags)->page; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 785d6e340292..10203f3b1ccf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6076,7 +6076,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, if (pages) { /* - * try_grab_compound_head() should always succeed here, + * try_grab_folio() should always succeed here, * because: a) we hold the ptl lock, and b) we've just * checked that the huge page is present in the page * tables. If the huge page is present, then the tail @@ -6085,9 +6085,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * any way. So this page must be available at this * point, unless the page refcount overflowed: */ - if (WARN_ON_ONCE(!try_grab_compound_head(pages[i], - refs, - flags))) { + if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs, + flags))) { spin_unlock(ptl); remainder = 0; err = -ENOMEM; -- cgit v1.2.3-71-gd317 From 659508f9c936aa6e3aaf6e9cf6a4a8836b8f8355 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 23 Dec 2021 10:20:12 -0500 Subject: mm/gup: Turn compound_range_next() into gup_folio_range_next() Convert the only caller to work on folios instead of pages. This removes the last caller of put_compound_head(), so delete it. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Reviewed-by: William Kucharski --- include/linux/mm.h | 4 ++-- mm/gup.c | 38 +++++++++++++++++--------------------- 2 files changed, 19 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index dca5c99395c9..0d3f9057a807 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -212,10 +212,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) -#define page_nth(head, tail) (page_to_pfn(tail) - page_to_pfn(head)) +#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) #else #define nth_page(page,n) ((page) + (n)) -#define page_nth(head, tail) ((tail) - (head)) +#define folio_page_idx(folio, p) ((p) - &(folio)->page) #endif /* to align the pointer to the (next) page boundary */ diff --git a/mm/gup.c b/mm/gup.c index 0bde28f0543f..5edd05df9c37 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -146,12 +146,6 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) folio_put_refs(folio, refs); } -static void put_compound_head(struct page *page, int refs, unsigned int flags) -{ - VM_BUG_ON_PAGE(PageTail(page), page); - gup_put_folio((struct folio *)page, refs, flags); -} - /** * try_grab_page() - elevate a page's refcount by a flag-dependent amount * @page: pointer to page to be grabbed @@ -214,20 +208,19 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); -static inline struct page *compound_range_next(struct page *start, +static inline struct folio *gup_folio_range_next(struct page *start, unsigned long npages, unsigned long i, unsigned int *ntails) { - struct page *next, *page; + struct page *next = nth_page(start, i); + struct folio *folio = page_folio(next); unsigned int nr = 1; - next = nth_page(start, i); - page = compound_head(next); - if (PageHead(page)) + if (folio_test_large(folio)) nr = min_t(unsigned int, npages - i, - compound_nr(page) - page_nth(page, next)); + folio_nr_pages(folio) - folio_page_idx(folio, next)); *ntails = nr; - return page; + return folio; } static inline struct folio *gup_folio_next(struct page **list, @@ -335,15 +328,18 @@ EXPORT_SYMBOL(unpin_user_pages_dirty_lock); void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty) { - unsigned long index; - struct page *head; - unsigned int ntails; + unsigned long i; + struct folio *folio; + unsigned int nr; - for (index = 0; index < npages; index += ntails) { - head = compound_range_next(page, npages, index, &ntails); - if (make_dirty && !PageDirty(head)) - set_page_dirty_lock(head); - put_compound_head(head, ntails, FOLL_PIN); + for (i = 0; i < npages; i += nr) { + folio = gup_folio_range_next(page, npages, i, &nr); + if (make_dirty && !folio_test_dirty(folio)) { + folio_lock(folio); + folio_mark_dirty(folio); + folio_unlock(folio); + } + gup_put_folio(folio, nr, FOLL_PIN); } } EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); -- cgit v1.2.3-71-gd317 From 536939ff516382b391a0039262e27fc80c7b3924 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 21 Mar 2022 12:57:38 -0400 Subject: mm: Add three folio wrappers folio_is_zone_device() is equivalent to is_zone_device_page(), folio_is_device_private() is equivalent to is_device_private_page(), and folio_is_pinnable() is equivalent to is_pinnable_page(). All of these tests return the same result for every page in the folio, so we can just pass the head page of the folio to the page variant of the function. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/memremap.h | 5 +++++ include/linux/mm.h | 10 ++++++++++ 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index d6a114dd5ea8..8af304f6b504 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -138,6 +138,11 @@ static inline bool is_device_private_page(const struct page *page) page->pgmap->type == MEMORY_DEVICE_PRIVATE; } +static inline bool folio_is_device_private(const struct folio *folio) +{ + return is_device_private_page(&folio->page); +} + static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_PCI_P2PDMA) && diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d3f9057a807..2ca10c167f35 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1075,6 +1075,11 @@ static inline bool is_zone_device_page(const struct page *page) } #endif +static inline bool folio_is_zone_device(const struct folio *folio) +{ + return is_zone_device_page(&folio->page); +} + static inline bool is_zone_movable_page(const struct page *page) { return page_zonenum(page) == ZONE_MOVABLE; @@ -1556,6 +1561,11 @@ static inline bool is_pinnable_page(struct page *page) } #endif +static inline bool folio_is_pinnable(struct folio *folio) +{ + return is_pinnable_page(&folio->page); +} + static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); -- cgit v1.2.3-71-gd317 From 8927f6473e56e32e328ae8ed43736412f7f76a4e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 23 Dec 2021 16:39:05 -0500 Subject: mm/workingset: Convert workingset_eviction() to take a folio This removes an assumption that THPs are the only kind of compound pages and removes a few hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/swap.h | 2 +- mm/vmscan.c | 7 ++++--- mm/workingset.c | 25 +++++++++++++------------ 3 files changed, 18 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 1d38d9475c4d..de36f140227e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -328,7 +328,7 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio) /* linux/mm/workingset.c */ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); -void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); +void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); void workingset_activation(struct folio *folio); diff --git a/mm/vmscan.c b/mm/vmscan.c index 55fb6d8e30fd..3b96f6e7d895 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1242,6 +1242,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) static int __remove_mapping(struct address_space *mapping, struct page *page, bool reclaimed, struct mem_cgroup *target_memcg) { + struct folio *folio = page_folio(page); int refcount; void *shadow = NULL; @@ -1289,7 +1290,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); if (reclaimed && !mapping_exiting(mapping)) - shadow = workingset_eviction(page, target_memcg); + shadow = workingset_eviction(folio, target_memcg); __delete_from_swap_cache(page, swap, shadow); xa_unlock_irq(&mapping->i_pages); put_swap_page(page, swap); @@ -1315,8 +1316,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, */ if (reclaimed && page_is_file_lru(page) && !mapping_exiting(mapping) && !dax_mapping(mapping)) - shadow = workingset_eviction(page, target_memcg); - __delete_from_page_cache(page, shadow); + shadow = workingset_eviction(folio, target_memcg); + __filemap_remove_folio(folio, shadow); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); diff --git a/mm/workingset.c b/mm/workingset.c index 8c03afe1d67c..b717eae4e0dd 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -245,31 +245,32 @@ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) } /** - * workingset_eviction - note the eviction of a page from memory + * workingset_eviction - note the eviction of a folio from memory * @target_memcg: the cgroup that is causing the reclaim - * @page: the page being evicted + * @folio: the folio being evicted * - * Return: a shadow entry to be stored in @page->mapping->i_pages in place - * of the evicted @page so that a later refault can be detected. + * Return: a shadow entry to be stored in @folio->mapping->i_pages in place + * of the evicted @folio so that a later refault can be detected. */ -void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) +void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) { - struct pglist_data *pgdat = page_pgdat(page); + struct pglist_data *pgdat = folio_pgdat(folio); unsigned long eviction; struct lruvec *lruvec; int memcgid; - /* Page is fully exclusive and pins page's memory cgroup pointer */ - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(page_count(page), page); - VM_BUG_ON_PAGE(!PageLocked(page), page); + /* Folio is fully exclusive and pins folio's memory cgroup pointer */ + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); - workingset_age_nonresident(lruvec, thp_nr_pages(page)); - return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); + workingset_age_nonresident(lruvec, folio_nr_pages(folio)); + return pack_shadow(memcgid, pgdat, eviction, + folio_test_workingset(folio)); } /** -- cgit v1.2.3-71-gd317 From 3ecb0087ecee6213544a1e0b838826a0f4831ce5 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 27 Dec 2021 21:11:34 -0500 Subject: mm/memcg: Convert mem_cgroup_swapout() to take a folio This removes an assumption that THPs are the only kind of compound pages and removes a couple of hidden calls to compound_head. It also documents that you can't pass a tail page to mem_cgroup_swapout(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/swap.h | 4 ++-- mm/memcontrol.c | 22 +++++++++++----------- mm/vmscan.c | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index de36f140227e..e7cb7a1e6ceb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -741,7 +741,7 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) #endif #ifdef CONFIG_MEMCG_SWAP -extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); +void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) { @@ -761,7 +761,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else -static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8fef072dc1ce..f7fbd5f91e3d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7116,19 +7116,19 @@ static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) /** * mem_cgroup_swapout - transfer a memsw charge to swap - * @page: page whose memsw charge to transfer + * @folio: folio whose memsw charge to transfer * @entry: swap entry to move the charge to * - * Transfer the memsw charge of @page to @entry. + * Transfer the memsw charge of @folio to @entry. */ -void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) { struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(page_count(page), page); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); if (mem_cgroup_disabled()) return; @@ -7136,9 +7136,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return; - memcg = page_memcg(page); + memcg = folio_memcg(folio); - VM_WARN_ON_ONCE_PAGE(!memcg, page); + VM_WARN_ON_ONCE_FOLIO(!memcg, folio); if (!memcg) return; @@ -7148,16 +7148,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * ancestor for the swap instead and transfer the memory+swap charge. */ swap_memcg = mem_cgroup_id_get_online(memcg); - nr_entries = thp_nr_pages(page); + nr_entries = folio_nr_pages(folio); /* Get references for the tail pages, too */ if (nr_entries > 1) mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), nr_entries); - VM_BUG_ON_PAGE(oldid, page); + VM_BUG_ON_FOLIO(oldid, folio); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); - page->memcg_data = 0; + folio->memcg_data = 0; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, nr_entries); @@ -7176,7 +7176,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) */ VM_BUG_ON(!irqs_disabled()); mem_cgroup_charge_statistics(memcg, -nr_entries); - memcg_check_events(memcg, page_to_nid(page)); + memcg_check_events(memcg, folio_nid(folio)); css_put(&memcg->css); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 3b96f6e7d895..4f54c6d22083 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1288,7 +1288,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; - mem_cgroup_swapout(page, swap); + mem_cgroup_swapout(folio, swap); if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); __delete_from_swap_cache(page, swap, shadow); -- cgit v1.2.3-71-gd317 From 06d20bdb986815a75fb1addf34655756ba922e3a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Jan 2022 14:40:12 -0500 Subject: mm: Add lru_to_folio() Since page->lru occupies the same bytes as compound_head, any page on the LRU list must be a folio. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2ca10c167f35..a583b7375445 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -225,6 +225,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) +static inline struct folio *lru_to_folio(struct list_head *head) +{ + return list_entry((head)->prev, struct folio, lru); +} void setup_initial_init_mm(void *start_code, void *end_code, void *end_data, void *brk); -- cgit v1.2.3-71-gd317 From 5100da38ef3c33d9ad8b60b29c2b671249bf7e1d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 12 Feb 2022 22:48:55 -0500 Subject: mm: Convert remove_mapping() to take a folio Add kernel-doc and return the number of pages removed in order to get the statistics right in __invalidate_mapping_pages(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Miaohe Lin --- fs/splice.c | 5 ++--- include/linux/swap.h | 2 +- mm/truncate.c | 2 +- mm/vmscan.c | 23 ++++++++++++++--------- 4 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/splice.c b/fs/splice.c index 23ff9c303abc..047b79db8eb5 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -46,8 +46,7 @@ static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - struct page *page = buf->page; - struct folio *folio = page_folio(page); + struct folio *folio = page_folio(buf->page); struct address_space *mapping; folio_lock(folio); @@ -74,7 +73,7 @@ static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe, * If we succeeded in removing the mapping, set LRU flag * and return good. */ - if (remove_mapping(mapping, page)) { + if (remove_mapping(mapping, folio)) { buf->flags |= PIPE_BUF_FLAG_LRU; return true; } diff --git a/include/linux/swap.h b/include/linux/swap.h index e7cb7a1e6ceb..304f174b4d31 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -395,7 +395,7 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; -extern int remove_mapping(struct address_space *mapping, struct page *page); +long remove_mapping(struct address_space *mapping, struct folio *folio); extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA diff --git a/mm/truncate.c b/mm/truncate.c index 06b7a4ca2370..1d97c4cae6a0 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -294,7 +294,7 @@ int invalidate_inode_page(struct page *page) if (folio_has_private(folio) && !filemap_release_folio(folio, 0)) return 0; - return remove_mapping(mapping, page); + return remove_mapping(mapping, folio); } /** diff --git a/mm/vmscan.c b/mm/vmscan.c index e49f5fb40a83..5a018aa5ab7c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1335,23 +1335,28 @@ cannot_free: return 0; } -/* - * Attempt to detach a locked page from its ->mapping. If it is dirty or if - * someone else has a ref on the page, abort and return 0. If it was - * successfully detached, return 1. Assumes the caller has a single ref on - * this page. +/** + * remove_mapping() - Attempt to remove a folio from its mapping. + * @mapping: The address space. + * @folio: The folio to remove. + * + * If the folio is dirty, under writeback or if someone else has a ref + * on it, removal will fail. + * Return: The number of pages removed from the mapping. 0 if the folio + * could not be removed. + * Context: The caller should have a single refcount on the folio and + * hold its lock. */ -int remove_mapping(struct address_space *mapping, struct page *page) +long remove_mapping(struct address_space *mapping, struct folio *folio) { - struct folio *folio = page_folio(page); if (__remove_mapping(mapping, folio, false, NULL)) { /* - * Unfreezing the refcount with 1 rather than 2 effectively + * Unfreezing the refcount with 1 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ folio_ref_unfreeze(folio, 1); - return 1; + return folio_nr_pages(folio); } return 0; } -- cgit v1.2.3-71-gd317 From d6c75dc22c755c567838f12f12a16f2a323ebd4e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 13 Feb 2022 15:22:28 -0500 Subject: mm/truncate: Split invalidate_inode_page() into mapping_evict_folio() Some of the callers already have the address_space and can avoid calling folio_mapping() and checking if the folio was already truncated. Also add kernel-doc and fix the return type (in case we ever support folios larger than 4TB). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Miaohe Lin --- include/linux/mm.h | 1 - mm/internal.h | 1 + mm/memory-failure.c | 4 ++-- mm/truncate.c | 34 +++++++++++++++++++++++----------- 4 files changed, 26 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index a583b7375445..dede2eda4d7f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1825,7 +1825,6 @@ extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int generic_error_remove_page(struct address_space *mapping, struct page *page); -int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, diff --git a/mm/internal.h b/mm/internal.h index ade30a1e6682..9c1959fff477 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -95,6 +95,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); +long invalidate_inode_page(struct page *page); /** * folio_evictable - Test whether a folio is evictable. diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 97a9ed8f87a9..0b72a936b8dd 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2139,7 +2139,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) */ static int __soft_offline_page(struct page *page) { - int ret = 0; + long ret = 0; unsigned long pfn = page_to_pfn(page); struct page *hpage = compound_head(page); char const *msg_page[] = {"page", "hugepage"}; @@ -2196,7 +2196,7 @@ static int __soft_offline_page(struct page *page) if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); - pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n", + pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", pfn, msg_page[huge], ret, &page->flags); if (ret > 0) ret = -EBUSY; diff --git a/mm/truncate.c b/mm/truncate.c index 1d97c4cae6a0..2fb10735aab4 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -273,18 +273,9 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page) } EXPORT_SYMBOL(generic_error_remove_page); -/* - * Safely invalidate one page from its pagecache mapping. - * It only drops clean, unused pages. The page must be locked. - * - * Returns 1 if the page is successfully invalidated, otherwise 0. - */ -int invalidate_inode_page(struct page *page) +static long mapping_evict_folio(struct address_space *mapping, + struct folio *folio) { - struct folio *folio = page_folio(page); - struct address_space *mapping = folio_mapping(folio); - if (!mapping) - return 0; if (folio_test_dirty(folio) || folio_test_writeback(folio)) return 0; /* The refcount will be elevated if any page in the folio is mapped */ @@ -297,6 +288,27 @@ int invalidate_inode_page(struct page *page) return remove_mapping(mapping, folio); } +/** + * invalidate_inode_page() - Remove an unused page from the pagecache. + * @page: The page to remove. + * + * Safely invalidate one page from its pagecache mapping. + * It only drops clean, unused pages. + * + * Context: Page must be locked. + * Return: The number of pages successfully removed. + */ +long invalidate_inode_page(struct page *page) +{ + struct folio *folio = page_folio(page); + struct address_space *mapping = folio_mapping(folio); + + /* The page may have been truncated before it was locked */ + if (!mapping) + return 0; + return mapping_evict_folio(mapping, folio); +} + /** * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate -- cgit v1.2.3-71-gd317 From 261b6840ed10419ac2f554e515592d59dd5c82cf Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 13 Feb 2022 16:40:24 -0500 Subject: mm: Turn deactivate_file_page() into deactivate_file_folio() This function has one caller which already has a reference to the page, so we don't need to use get_page_unless_zero(). Also move the prototype to mm/internal.h. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Miaohe Lin --- include/linux/swap.h | 1 - mm/internal.h | 1 + mm/swap.c | 35 ++++++++++++++++++----------------- mm/truncate.c | 2 +- 4 files changed, 20 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 304f174b4d31..064e60e9f63f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -372,7 +372,6 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); -extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); diff --git a/mm/internal.h b/mm/internal.h index 9c1959fff477..7c441f43ba31 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -66,6 +66,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat) vm_fault_t do_swap_page(struct vm_fault *vmf); void folio_rotate_reclaimable(struct folio *folio); bool __folio_end_writeback(struct folio *folio); +void deactivate_file_folio(struct folio *folio); void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/swap.c b/mm/swap.c index fc3b7989f5b2..65ec5cbab78b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -630,32 +630,33 @@ void lru_add_drain_cpu(int cpu) } /** - * deactivate_file_page - forcefully deactivate a file page - * @page: page to deactivate + * deactivate_file_folio() - Forcefully deactivate a file folio. + * @folio: Folio to deactivate. * - * This function hints the VM that @page is a good reclaim candidate, - * for example if its invalidation fails due to the page being dirty + * This function hints to the VM that @folio is a good reclaim candidate, + * for example if its invalidation fails due to the folio being dirty * or under writeback. + * + * Context: Caller holds a reference on the page. */ -void deactivate_file_page(struct page *page) +void deactivate_file_folio(struct folio *folio) { + struct pagevec *pvec; + /* - * In a workload with many unevictable page such as mprotect, - * unevictable page deactivation for accelerating reclaim is pointless. + * In a workload with many unevictable pages such as mprotect, + * unevictable folio deactivation for accelerating reclaim is pointless. */ - if (PageUnevictable(page)) + if (folio_test_unevictable(folio)) return; - if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec; - - local_lock(&lru_pvecs.lock); - pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); + folio_get(folio); + local_lock(&lru_pvecs.lock); + pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); - if (pagevec_add_and_need_flush(pvec, page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); - local_unlock(&lru_pvecs.lock); - } + if (pagevec_add_and_need_flush(pvec, &folio->page)) + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); + local_unlock(&lru_pvecs.lock); } /* diff --git a/mm/truncate.c b/mm/truncate.c index a8b0243eadf6..9ed62cb3c503 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -527,7 +527,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, * of interest and try to speed up its reclaim. */ if (!ret) { - deactivate_file_page(&folio->page); + deactivate_file_folio(folio); /* It is likely on the pagevec of a remote CPU */ if (nr_pagevec) (*nr_pagevec)++; -- cgit v1.2.3-71-gd317 From c56109dd35c9204cd6c49d2116ef36e5044ef867 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 13 Feb 2022 17:22:10 -0500 Subject: mm/truncate: Combine invalidate_mapping_pagevec() and __invalidate_mapping_pages() We can save a function call by combining these two functions, which are identical except for the return value. Also move the prototype to mm/internal.h. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Miaohe Lin --- include/linux/fs.h | 4 ---- mm/internal.h | 2 ++ mm/truncate.c | 32 +++++++++++++------------------- 3 files changed, 15 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..85c584c5c623 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2749,10 +2749,6 @@ extern bool is_bad_inode(struct inode *); unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); -void invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, - unsigned long *nr_pagevec); - static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || diff --git a/mm/internal.h b/mm/internal.h index 7c441f43ba31..6047268076e7 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -97,6 +97,8 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); long invalidate_inode_page(struct page *page); +unsigned long invalidate_mapping_pagevec(struct address_space *mapping, + pgoff_t start, pgoff_t end, unsigned long *nr_pagevec); /** * folio_evictable - Test whether a folio is evictable. diff --git a/mm/truncate.c b/mm/truncate.c index 9ed62cb3c503..cace6e3e4e8c 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -494,7 +494,18 @@ void truncate_inode_pages_final(struct address_space *mapping) } EXPORT_SYMBOL(truncate_inode_pages_final); -static unsigned long __invalidate_mapping_pages(struct address_space *mapping, +/** + * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode + * @mapping: the address_space which holds the pages to invalidate + * @start: the offset 'from' which to invalidate + * @end: the offset 'to' which to invalidate (inclusive) + * @nr_pagevec: invalidate failed page number for caller + * + * This helper is similar to invalidate_mapping_pages(), except that it accounts + * for pages that are likely on a pagevec and counts them in @nr_pagevec, which + * will be used by the caller. + */ +unsigned long invalidate_mapping_pagevec(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) { pgoff_t indices[PAGEVEC_SIZE]; @@ -559,27 +570,10 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { - return __invalidate_mapping_pages(mapping, start, end, NULL); + return invalidate_mapping_pagevec(mapping, start, end, NULL); } EXPORT_SYMBOL(invalidate_mapping_pages); -/** - * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode - * @mapping: the address_space which holds the pages to invalidate - * @start: the offset 'from' which to invalidate - * @end: the offset 'to' which to invalidate (inclusive) - * @nr_pagevec: invalidate failed page number for caller - * - * This helper is similar to invalidate_mapping_pages(), except that it accounts - * for pages that are likely on a pagevec and counts them in @nr_pagevec, which - * will be used by the caller. - */ -void invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) -{ - __invalidate_mapping_pages(mapping, start, end, nr_pagevec); -} - /* * This is like invalidate_inode_page(), except it ignores the page's * refcount. We do this because invalidate_inode_pages2() needs stronger -- cgit v1.2.3-71-gd317 From cbcc268bb1ce5b539e7652d398e08e9b83dc4cef Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 13 Feb 2022 17:23:58 -0500 Subject: fs: Move many prototypes to pagemap.h These functions are page cache functionality and don't need to be declared in fs.h. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Miaohe Lin --- drivers/block/xen-blkback/xenbus.c | 1 + drivers/usb/gadget/function/f_mass_storage.c | 1 + fs/coda/file.c | 1 + fs/iomap/fiemap.c | 1 + fs/nfsd/filecache.c | 1 + fs/nfsd/vfs.c | 1 + fs/vboxsf/utils.c | 1 + include/linux/fs.h | 116 --------------------------- include/linux/pagemap.h | 114 ++++++++++++++++++++++++++ 9 files changed, 121 insertions(+), 116 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 62125fd4af4a..f09040435e2e 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include "common.h" diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 46dd11dcb3a8..7371c6e65b10 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -179,6 +179,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/coda/file.c b/fs/coda/file.c index 29dd87be2fb8..3f3c81e6b1ab 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c index 66cf267c68ae..610ca6f1ec9b 100644 --- a/fs/iomap/fiemap.c +++ b/fs/iomap/fiemap.c @@ -7,6 +7,7 @@ #include #include #include +#include static int iomap_to_fiemap(struct fiemap_extent_info *fi, const struct iomap *iomap, u32 flags) diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 8bc807c5fea4..47f804e0ec93 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 91600e71be19..fe0d7abbc1b1 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c index aec2ebf7d25a..e1db0f3f7e5e 100644 --- a/fs/vboxsf/utils.c +++ b/fs/vboxsf/utils.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "vfsmod.h" diff --git a/include/linux/fs.h b/include/linux/fs.h index 85c584c5c623..0961c979e949 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2746,50 +2746,6 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); -unsigned long invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end); - -static inline void invalidate_remote_inode(struct inode *inode) -{ - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - invalidate_mapping_pages(inode->i_mapping, 0, -1); -} -extern int invalidate_inode_pages2(struct address_space *mapping); -extern int invalidate_inode_pages2_range(struct address_space *mapping, - pgoff_t start, pgoff_t end); -extern int write_inode_now(struct inode *, int); -extern int filemap_fdatawrite(struct address_space *); -extern int filemap_flush(struct address_space *); -extern int filemap_fdatawait_keep_errors(struct address_space *mapping); -extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, - loff_t lend); -extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, - loff_t start_byte, loff_t end_byte); - -static inline int filemap_fdatawait(struct address_space *mapping) -{ - return filemap_fdatawait_range(mapping, 0, LLONG_MAX); -} - -extern bool filemap_range_has_page(struct address_space *, loff_t lstart, - loff_t lend); -extern int filemap_write_and_wait_range(struct address_space *mapping, - loff_t lstart, loff_t lend); -extern int __filemap_fdatawrite_range(struct address_space *mapping, - loff_t start, loff_t end, int sync_mode); -extern int filemap_fdatawrite_range(struct address_space *mapping, - loff_t start, loff_t end); -extern int filemap_check_errors(struct address_space *mapping); -extern void __filemap_set_wb_err(struct address_space *mapping, int err); -int filemap_fdatawrite_wbc(struct address_space *mapping, - struct writeback_control *wbc); - -static inline int filemap_write_and_wait(struct address_space *mapping) -{ - return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); -} - extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, loff_t lend); extern int __must_check file_check_and_advance_wb_err(struct file *file); @@ -2801,67 +2757,6 @@ static inline int file_write_and_wait(struct file *file) return file_write_and_wait_range(file, 0, LLONG_MAX); } -/** - * filemap_set_wb_err - set a writeback error on an address_space - * @mapping: mapping in which to set writeback error - * @err: error to be set in mapping - * - * When writeback fails in some way, we must record that error so that - * userspace can be informed when fsync and the like are called. We endeavor - * to report errors on any file that was open at the time of the error. Some - * internal callers also need to know when writeback errors have occurred. - * - * When a writeback error occurs, most filesystems will want to call - * filemap_set_wb_err to record the error in the mapping so that it will be - * automatically reported whenever fsync is called on the file. - */ -static inline void filemap_set_wb_err(struct address_space *mapping, int err) -{ - /* Fastpath for common case of no error */ - if (unlikely(err)) - __filemap_set_wb_err(mapping, err); -} - -/** - * filemap_check_wb_err - has an error occurred since the mark was sampled? - * @mapping: mapping to check for writeback errors - * @since: previously-sampled errseq_t - * - * Grab the errseq_t value from the mapping, and see if it has changed "since" - * the given value was sampled. - * - * If it has then report the latest error set, otherwise return 0. - */ -static inline int filemap_check_wb_err(struct address_space *mapping, - errseq_t since) -{ - return errseq_check(&mapping->wb_err, since); -} - -/** - * filemap_sample_wb_err - sample the current errseq_t to test for later errors - * @mapping: mapping to be sampled - * - * Writeback errors are always reported relative to a particular sample point - * in the past. This function provides those sample points. - */ -static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) -{ - return errseq_sample(&mapping->wb_err); -} - -/** - * file_sample_sb_err - sample the current errseq_t to test for later errors - * @file: file pointer to be sampled - * - * Grab the most current superblock-level errseq_t value for the given - * struct file. - */ -static inline errseq_t file_sample_sb_err(struct file *file) -{ - return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); -} - extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); @@ -3604,15 +3499,4 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); -/* - * Flush file data before changing attributes. Caller must hold any locks - * required to prevent further writes to this file until we're done setting - * flags. - */ -static inline int inode_drain_writes(struct inode *inode) -{ - inode_dio_wait(inode); - return filemap_write_and_wait(inode->i_mapping); -} - #endif /* _LINUX_FS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cdb3f118603a..f968b36ad771 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,6 +18,120 @@ struct folio_batch; +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +static inline void invalidate_remote_inode(struct inode *inode) +{ + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + invalidate_mapping_pages(inode->i_mapping, 0, -1); +} +int invalidate_inode_pages2(struct address_space *mapping); +int invalidate_inode_pages2_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +int write_inode_now(struct inode *, int sync); +int filemap_fdatawrite(struct address_space *); +int filemap_flush(struct address_space *); +int filemap_fdatawait_keep_errors(struct address_space *mapping); +int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); +int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); + +static inline int filemap_fdatawait(struct address_space *mapping) +{ + return filemap_fdatawait_range(mapping, 0, LLONG_MAX); +} + +bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); +int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend); +int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode); +int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); +int filemap_check_errors(struct address_space *mapping); +void __filemap_set_wb_err(struct address_space *mapping, int err); +int filemap_fdatawrite_wbc(struct address_space *mapping, + struct writeback_control *wbc); + +static inline int filemap_write_and_wait(struct address_space *mapping) +{ + return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); +} + +/** + * filemap_set_wb_err - set a writeback error on an address_space + * @mapping: mapping in which to set writeback error + * @err: error to be set in mapping + * + * When writeback fails in some way, we must record that error so that + * userspace can be informed when fsync and the like are called. We endeavor + * to report errors on any file that was open at the time of the error. Some + * internal callers also need to know when writeback errors have occurred. + * + * When a writeback error occurs, most filesystems will want to call + * filemap_set_wb_err to record the error in the mapping so that it will be + * automatically reported whenever fsync is called on the file. + */ +static inline void filemap_set_wb_err(struct address_space *mapping, int err) +{ + /* Fastpath for common case of no error */ + if (unlikely(err)) + __filemap_set_wb_err(mapping, err); +} + +/** + * filemap_check_wb_err - has an error occurred since the mark was sampled? + * @mapping: mapping to check for writeback errors + * @since: previously-sampled errseq_t + * + * Grab the errseq_t value from the mapping, and see if it has changed "since" + * the given value was sampled. + * + * If it has then report the latest error set, otherwise return 0. + */ +static inline int filemap_check_wb_err(struct address_space *mapping, + errseq_t since) +{ + return errseq_check(&mapping->wb_err, since); +} + +/** + * filemap_sample_wb_err - sample the current errseq_t to test for later errors + * @mapping: mapping to be sampled + * + * Writeback errors are always reported relative to a particular sample point + * in the past. This function provides those sample points. + */ +static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) +{ + return errseq_sample(&mapping->wb_err); +} + +/** + * file_sample_sb_err - sample the current errseq_t to test for later errors + * @file: file pointer to be sampled + * + * Grab the most current superblock-level errseq_t value for the given + * struct file. + */ +static inline errseq_t file_sample_sb_err(struct file *file) +{ + return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); +} + +/* + * Flush file data before changing attributes. Caller must hold any locks + * required to prevent further writes to this file until we're done setting + * flags. + */ +static inline int inode_drain_writes(struct inode *inode) +{ + inode_dio_wait(inode); + return filemap_write_and_wait(inode->i_mapping); +} + static inline bool mapping_empty(struct address_space *mapping) { return xa_empty(&mapping->i_pages); -- cgit v1.2.3-71-gd317 From 74e8ee4708a8edabbbc7ab8c12ec24d7a561bb41 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 18 Jan 2022 10:50:48 -0500 Subject: mm: Turn head_compound_mapcount() into folio_entire_mapcount() Adjust documentation to be more clear. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/mm.h | 17 +++++++++++------ mm/debug.c | 6 ++++-- 2 files changed, 15 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index dede2eda4d7f..70f0ca217962 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -776,21 +776,26 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif -static inline int head_compound_mapcount(struct page *head) +/* + * How many times the entire folio is mapped as a single unit (eg by a + * PMD or PUD entry). This is probably not what you want, except for + * debugging purposes; look at folio_mapcount() or page_mapcount() + * instead. + */ +static inline int folio_entire_mapcount(struct folio *folio) { - return atomic_read(compound_mapcount_ptr(head)) + 1; + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + return atomic_read(folio_mapcount_ptr(folio)) + 1; } /* * Mapcount of compound page as a whole, does not include mapped sub-pages. * - * Must be called only for compound pages or any their tail sub-pages. + * Must be called only for compound pages. */ static inline int compound_mapcount(struct page *page) { - VM_BUG_ON_PAGE(!PageCompound(page), page); - page = compound_head(page); - return head_compound_mapcount(page); + return folio_entire_mapcount(page_folio(page)); } /* diff --git a/mm/debug.c b/mm/debug.c index c4cf44266430..eeb7ea3ca292 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -48,7 +48,8 @@ const struct trace_print_flags vmaflag_names[] = { static void __dump_page(struct page *page) { - struct page *head = compound_head(page); + struct folio *folio = page_folio(page); + struct page *head = &folio->page; struct address_space *mapping; bool compound = PageCompound(page); /* @@ -76,6 +77,7 @@ static void __dump_page(struct page *page) else mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); head = page; + folio = (struct folio *)page; compound = false; } else { mapping = page_mapping(page); @@ -94,7 +96,7 @@ static void __dump_page(struct page *page) if (compound) { pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", head, compound_order(head), - head_compound_mapcount(head), + folio_entire_mapcount(folio), head_compound_pincount(head)); } -- cgit v1.2.3-71-gd317 From 4ba1119cd53166d853050ff1a9d76079cd8f8e06 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Jan 2022 16:33:26 -0500 Subject: mm: Add folio_mapcount() This implements the same algorithm as total_mapcount(), which is transformed into a wrapper function. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/mm.h | 8 +++++++- mm/huge_memory.c | 24 ------------------------ mm/util.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 70f0ca217962..0d380dc26847 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -825,8 +825,14 @@ static inline int page_mapcount(struct page *page) return atomic_read(&page->_mapcount) + 1; } +int folio_mapcount(struct folio *folio); + #ifdef CONFIG_TRANSPARENT_HUGEPAGE -int total_mapcount(struct page *page); +static inline int total_mapcount(struct page *page) +{ + return folio_mapcount(page_folio(page)); +} + int page_trans_huge_mapcount(struct page *page); #else static inline int total_mapcount(struct page *page) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9afca0122723..beebe4105659 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2465,30 +2465,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, } } -int total_mapcount(struct page *page) -{ - int i, compound, nr, ret; - - VM_BUG_ON_PAGE(PageTail(page), page); - - if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) + 1; - - compound = compound_mapcount(page); - nr = compound_nr(page); - if (PageHuge(page)) - return compound; - ret = compound; - for (i = 0; i < nr; i++) - ret += atomic_read(&page[i]._mapcount) + 1; - /* File pages has compound_mapcount included in _mapcount */ - if (!PageAnon(page)) - return ret - compound * nr; - if (PageDoubleMap(page)) - ret -= nr; - return ret; -} - /* * This calculates accurately how many mappings a transparent hugepage * has (unlike page_mapcount() which isn't fully accurate). This full diff --git a/mm/util.c b/mm/util.c index 7e43369064c8..b614f423aaa4 100644 --- a/mm/util.c +++ b/mm/util.c @@ -740,6 +740,39 @@ int __page_mapcount(struct page *page) } EXPORT_SYMBOL_GPL(__page_mapcount); +/** + * folio_mapcount() - Calculate the number of mappings of this folio. + * @folio: The folio. + * + * A large folio tracks both how many times the entire folio is mapped, + * and how many times each individual page in the folio is mapped. + * This function calculates the total number of times the folio is + * mapped. + * + * Return: The number of times this folio is mapped. + */ +int folio_mapcount(struct folio *folio) +{ + int i, compound, nr, ret; + + if (likely(!folio_test_large(folio))) + return atomic_read(&folio->_mapcount) + 1; + + compound = folio_entire_mapcount(folio); + nr = folio_nr_pages(folio); + if (folio_test_hugetlb(folio)) + return compound; + ret = compound; + for (i = 0; i < nr; i++) + ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1; + /* File pages has compound_mapcount included in _mapcount */ + if (!folio_test_anon(folio)) + return ret - compound * nr; + if (folio_test_double_map(folio)) + ret -= nr; + return ret; +} + /** * folio_copy - Copy the contents of one folio to another. * @dst: Folio to copy to. -- cgit v1.2.3-71-gd317 From 346cf61311f6e348337e95aa2febe29e86137f42 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 18 Jan 2022 08:56:47 -0500 Subject: mm: Add split_folio_to_list() This is a convenience function; split_huge_page_to_list() can take any page in a folio (and does so on purpose because that page will be the one which keeps the refcount). But it's convenient for the callers to pass the folio instead of the first page in the folio. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/huge_mm.h | 6 ++++++ mm/vmscan.c | 10 +++++----- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e4c18ba8d3bf..71c073d411ac 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -483,6 +483,12 @@ static inline bool thp_migration_supported(void) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline int split_folio_to_list(struct folio *folio, + struct list_head *list) +{ + return split_huge_page_to_list(&folio->page, list); +} + /** * thp_size - Size of a transparent huge page. * @page: Head page of a transparent huge page. diff --git a/mm/vmscan.c b/mm/vmscan.c index 815fe89d37f9..94729d2d1125 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1715,16 +1715,16 @@ retry: * tail pages can be freed without IO. */ if (!compound_mapcount(page) && - split_huge_page_to_list(page, - page_list)) + split_folio_to_list(folio, + page_list)) goto activate_locked; } if (!add_to_swap(page)) { if (!PageTransHuge(page)) goto activate_locked_split; /* Fallback to swap normal pages */ - if (split_huge_page_to_list(page, - page_list)) + if (split_folio_to_list(folio, + page_list)) goto activate_locked; #ifdef CONFIG_TRANSPARENT_HUGEPAGE count_vm_event(THP_SWPOUT_FALLBACK); @@ -1740,7 +1740,7 @@ retry: } } else if (unlikely(PageTransHuge(page))) { /* Split file THP */ - if (split_huge_page_to_list(page, page_list)) + if (split_folio_to_list(folio, page_list)) goto keep_locked; } -- cgit v1.2.3-71-gd317 From f087b903fc2e4975bff9742a66ee7a837a2f545b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Feb 2022 23:29:45 -0500 Subject: mm: Add folio_pgoff() This is the folio equivalent of page_to_pgoff(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/pagemap.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index f968b36ad771..a73c928e1d74 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -817,6 +817,17 @@ static inline loff_t folio_file_pos(struct folio *folio) return page_file_offset(&folio->page); } +/* + * Get the offset in PAGE_SIZE (even for hugetlb folios). + * (TODO: hugetlb folios should have ->index in PAGE_SIZE) + */ +static inline pgoff_t folio_pgoff(struct folio *folio) +{ + if (unlikely(folio_test_hugetlb(folio))) + return hugetlb_basepage_index(&folio->page); + return folio->index; +} + extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address); -- cgit v1.2.3-71-gd317 From eed05e54d275b3cfc5d8c79843c5276a5878e94a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 3 Feb 2022 09:06:08 -0500 Subject: mm: Add DEFINE_PAGE_VMA_WALK and DEFINE_FOLIO_VMA_WALK Instead of declaring a struct page_vma_mapped_walk directly, use these helpers to allow us to transition to a PFN approach in the following patches. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 16 ++++++++++++++++ kernel/events/uprobes.c | 6 +----- mm/damon/paddr.c | 12 ++---------- mm/ksm.c | 5 +---- mm/migrate.c | 7 +------ mm/page_idle.c | 6 +----- mm/rmap.c | 31 +++++-------------------------- 7 files changed, 27 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index ac29b076082b..0d894a2bfaa1 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -214,6 +214,22 @@ struct page_vma_mapped_walk { unsigned int flags; }; +#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .page = _page, \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + +#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .page = &_folio->page, \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index eed2f7437d96..6418083901d4 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -155,11 +155,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = compound_head(old_page), - .vma = vma, - .address = addr, - }; + DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0); int err; struct mmu_notifier_range range; diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5e8244f65a1a..cb45d49c731d 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -19,11 +19,7 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; @@ -93,11 +89,7 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { struct damon_pa_access_chk_result *result = arg; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); result->accessed = false; result->page_sz = PAGE_SIZE; diff --git a/mm/ksm.c b/mm/ksm.c index c5a4403b5dc9..ea82fef93a31 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1034,10 +1034,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; diff --git a/mm/migrate.c b/mm/migrate.c index f4076093c855..71f92e8ed934 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -174,12 +174,7 @@ void putback_movable_pages(struct list_head *l) static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *old) { - struct page_vma_mapped_walk pvmw = { - .page = old, - .vma = vma, - .address = addr, - .flags = PVMW_SYNC | PVMW_MIGRATION, - }; + DEFINE_PAGE_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); struct page *new; pte_t pte; swp_entry_t entry; diff --git a/mm/page_idle.c b/mm/page_idle.c index edead6a8a5f9..3e05bf1ce825 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -48,11 +48,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { diff --git a/mm/rmap.c b/mm/rmap.c index 1a13d5d6cfc7..a7f06b76b503 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -802,11 +802,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct page_referenced_arg *pra = arg; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); int referenced = 0; while (page_vma_mapped_walk(&pvmw)) { @@ -934,12 +930,7 @@ int page_referenced(struct page *page, static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - .flags = PVMW_SYNC, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, PVMW_SYNC); struct mmu_notifier_range range; int *cleaned = arg; @@ -1419,11 +1410,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -1714,11 +1701,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -2001,11 +1984,7 @@ static bool page_make_device_exclusive_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *priv) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); struct make_exclusive_args *args = priv; pte_t pteval; struct page *subpage; -- cgit v1.2.3-71-gd317 From 2aff7a4755bed2870ee23b75bc88cdc8d76cdd03 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 3 Feb 2022 11:40:17 -0500 Subject: mm: Convert page_vma_mapped_walk to work on PFNs page_mapped_in_vma() really just wants to walk one page, but as the code stands, if passed the head page of a compound page, it will walk every page in the compound page. Extract pfn/nr_pages/pgoff from the struct page early, so they can be overridden by page_mapped_in_vma(). Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/hugetlb.h | 5 +++++ include/linux/rmap.h | 17 ++++++++++----- mm/internal.h | 15 ++++++++----- mm/migrate.c | 5 +++-- mm/page_vma_mapped.c | 58 ++++++++++++++++++++++--------------------------- mm/rmap.c | 8 +++---- 6 files changed, 58 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d1897a69c540..6ba2f8e74fbb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -970,6 +970,11 @@ static inline struct hstate *page_hstate(struct page *page) return NULL; } +static inline struct hstate *size_to_hstate(unsigned long size) +{ + return NULL; +} + static inline unsigned long huge_page_size(struct hstate *h) { return PAGE_SIZE; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0d894a2bfaa1..0c838ba1a8ee 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -11,6 +11,7 @@ #include #include #include +#include /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -201,11 +202,13 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) -/* Look for migarion entries rather than present PTEs */ +/* Look for migration entries rather than present PTEs */ #define PVMW_MIGRATION (1 << 1) struct page_vma_mapped_walk { - struct page *page; + unsigned long pfn; + unsigned long nr_pages; + pgoff_t pgoff; struct vm_area_struct *vma; unsigned long address; pmd_t *pmd; @@ -216,7 +219,9 @@ struct page_vma_mapped_walk { #define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ struct page_vma_mapped_walk name = { \ - .page = _page, \ + .pfn = page_to_pfn(_page), \ + .nr_pages = compound_nr(page), \ + .pgoff = page_to_pgoff(page), \ .vma = _vma, \ .address = _address, \ .flags = _flags, \ @@ -224,7 +229,9 @@ struct page_vma_mapped_walk { #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ struct page_vma_mapped_walk name = { \ - .page = &_folio->page, \ + .pfn = folio_pfn(_folio), \ + .nr_pages = folio_nr_pages(_folio), \ + .pgoff = folio_pgoff(_folio), \ .vma = _vma, \ .address = _address, \ .flags = _flags, \ @@ -233,7 +240,7 @@ struct page_vma_mapped_walk { static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ - if (pvmw->pte && !PageHuge(pvmw->page)) + if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); diff --git a/mm/internal.h b/mm/internal.h index 6047268076e7..3b652444f070 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -10,6 +10,7 @@ #include #include #include +#include #include struct folio_batch; @@ -475,18 +476,20 @@ vma_address(struct page *page, struct vm_area_struct *vma) } /* - * Then at what user virtual address will none of the page be found in vma? + * Then at what user virtual address will none of the range be found in vma? * Assumes that vma_address() already returned a good starting address. - * If page is a compound head, the entire compound page is considered. */ -static inline unsigned long -vma_address_end(struct page *page, struct vm_area_struct *vma) +static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) { + struct vm_area_struct *vma = pvmw->vma; pgoff_t pgoff; unsigned long address; - VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ - pgoff = page_to_pgoff(page) + compound_nr(page); + /* Common case, plus ->pgoff is invalid for KSM */ + if (pvmw->nr_pages == 1) + return pvmw->address + PAGE_SIZE; + + pgoff = pvmw->pgoff + pvmw->nr_pages; address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); /* Check for address beyond vma (or wrapped through 0?) */ if (address < vma->vm_start || address > vma->vm_end) diff --git a/mm/migrate.c b/mm/migrate.c index 71f92e8ed934..358bc311caaa 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -174,7 +174,8 @@ void putback_movable_pages(struct list_head *l) static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *old) { - DEFINE_PAGE_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); + DEFINE_PAGE_VMA_WALK(pvmw, (struct page *)old, vma, addr, + PVMW_SYNC | PVMW_MIGRATION); struct page *new; pte_t pte; swp_entry_t entry; @@ -184,7 +185,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, if (PageKsm(page)) new = page; else - new = page - pvmw.page->index + + new = page - pvmw.pgoff + linear_page_index(vma, pvmw.address); #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index f7b331081791..1187f9c1ec5b 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -53,18 +53,6 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) return true; } -static inline bool pfn_is_match(struct page *page, unsigned long pfn) -{ - unsigned long page_pfn = page_to_pfn(page); - - /* normal page and hugetlbfs page */ - if (!PageTransCompound(page) || PageHuge(page)) - return page_pfn == pfn; - - /* THP can be referenced by any subpage */ - return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page); -} - /** * check_pte - check if @pvmw->page is mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking @@ -116,7 +104,17 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) pfn = pte_pfn(*pvmw->pte); } - return pfn_is_match(pvmw->page, pfn); + return (pfn - pvmw->pfn) < pvmw->nr_pages; +} + +/* Returns true if the two ranges overlap. Careful to not overflow. */ +static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) +{ + if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) + return false; + if (pfn > pvmw->pfn + pvmw->nr_pages - 1) + return false; + return true; } static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) @@ -127,7 +125,7 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) } /** - * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at + * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. @@ -152,8 +150,8 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) */ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) { - struct mm_struct *mm = pvmw->vma->vm_mm; - struct page *page = pvmw->page; + struct vm_area_struct *vma = pvmw->vma; + struct mm_struct *mm = vma->vm_mm; unsigned long end; pgd_t *pgd; p4d_t *p4d; @@ -164,32 +162,26 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); - if (unlikely(PageHuge(page))) { + if (unlikely(is_vm_hugetlb_page(vma))) { + unsigned long size = pvmw->nr_pages * PAGE_SIZE; /* The only possible mapping was handled on last iteration */ if (pvmw->pte) return not_found(pvmw); /* when pud is not present, pte will be NULL */ - pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); + pvmw->pte = huge_pte_offset(mm, pvmw->address, size); if (!pvmw->pte) return false; - pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); + pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm, + pvmw->pte); spin_lock(pvmw->ptl); if (!check_pte(pvmw)) return not_found(pvmw); return true; } - /* - * Seek to next pte only makes sense for THP. - * But more important than that optimization, is to filter out - * any PageKsm page: whose page->index misleads vma_address() - * and vma_address_end() to disaster. - */ - end = PageTransCompound(page) ? - vma_address_end(page, pvmw->vma) : - pvmw->address + PAGE_SIZE; + end = vma_address_end(pvmw); if (pvmw->pte) goto next_pte; restart: @@ -224,7 +216,7 @@ restart: if (likely(pmd_trans_huge(pmde))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); - if (pmd_page(pmde) != page) + if (!check_pmd(pmd_pfn(pmde), pvmw)) return not_found(pvmw); return true; } @@ -236,7 +228,7 @@ restart: return not_found(pvmw); entry = pmd_to_swp_entry(pmde); if (!is_migration_entry(entry) || - pfn_swap_entry_to_page(entry) != page) + !check_pmd(swp_offset(entry), pvmw)) return not_found(pvmw); return true; } @@ -250,7 +242,8 @@ restart: * cleared *pmd but not decremented compound_mapcount(). */ if ((pvmw->flags & PVMW_SYNC) && - PageTransCompound(page)) { + transparent_hugepage_active(vma) && + (pvmw->nr_pages >= HPAGE_PMD_NR)) { spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); spin_unlock(ptl); @@ -307,7 +300,8 @@ next_pte: int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { - .page = page, + .pfn = page_to_pfn(page), + .nr_pages = 1, .vma = vma, .flags = PVMW_SYNC, }; diff --git a/mm/rmap.c b/mm/rmap.c index a7f06b76b503..e27ba4172069 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -940,7 +940,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma, vma->vm_mm, address, - vma_address_end(page, vma)); + vma_address_end(&pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { @@ -1437,8 +1437,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ - range.end = PageKsm(page) ? - address + PAGE_SIZE : vma_address_end(page, vma); + range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address, range.end); if (PageHuge(page)) { @@ -1732,8 +1731,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ - range.end = PageKsm(page) ? - address + PAGE_SIZE : vma_address_end(page, vma); + range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address, range.end); if (PageHuge(page)) { -- cgit v1.2.3-71-gd317 From b3ac04132c4b9bc5c9c14608424d410e7ca3b400 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 21 Jan 2022 11:27:31 -0500 Subject: mm/rmap: Turn page_referenced() into folio_referenced() Both its callers pass a page which was previously on an LRU list, so were passing a folio by definition. Use the type system to enforce that and remove a few calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/rmap.h | 4 +-- mm/page_idle.c | 2 +- mm/rmap.c | 70 ++++++++++++++++++++++++++-------------------------- mm/vmscan.c | 20 +++++++++------ 4 files changed, 50 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0c838ba1a8ee..69a1664216de 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -190,7 +190,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) /* * Called from mm/vmscan.c to handle paging out */ -int page_referenced(struct page *, int is_locked, +int folio_referenced(struct folio *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); void try_to_migrate(struct page *page, enum ttu_flags flags); @@ -301,7 +301,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) -static inline int page_referenced(struct page *page, int is_locked, +static inline int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { diff --git a/mm/page_idle.c b/mm/page_idle.c index 2427d832f5d6..5c73a9b578da 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -77,7 +77,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page, /* * We cleared the referenced bit in a mapping to this page. To * avoid interference with page reclaim, mark it young so that - * page_referenced() will return > 0. + * folio_referenced() will return > 0. */ folio_set_young(folio); } diff --git a/mm/rmap.c b/mm/rmap.c index b1d7f3e7f58c..36eef54d05d8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -789,29 +789,30 @@ out: return pmd; } -struct page_referenced_arg { +struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* - * arg: page_referenced_arg will be passed + * arg: folio_referenced_arg will be passed */ -static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, +static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct page_referenced_arg *pra = arg; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); + struct folio *folio = page_folio(page); + struct folio_referenced_arg *pra = arg; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; if ((vma->vm_flags & VM_LOCKED) && - (!PageTransCompound(page) || !pvmw.pte)) { + (!folio_test_large(folio) || !pvmw.pte)) { /* Restore the mlock which got missed */ - mlock_vma_page(page, vma, !pvmw.pte); + mlock_vma_folio(folio, vma, !pvmw.pte); page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ @@ -823,10 +824,10 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, /* * Don't treat a reference through * a sequentially read mapping as such. - * If the page has been used in another mapping, + * If the folio has been used in another mapping, * we will catch it; if this other mapping is * already gone, the unmap path will have set - * PG_referenced or activated the page. + * the referenced flag or activated the folio. */ if (likely(!(vma->vm_flags & VM_SEQ_READ))) referenced++; @@ -836,7 +837,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, pvmw.pmd)) referenced++; } else { - /* unexpected pmd-mapped page? */ + /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); } @@ -844,8 +845,8 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, } if (referenced) - clear_page_idle(page); - if (test_and_clear_page_young(page)) + folio_clear_idle(folio); + if (folio_test_clear_young(folio)) referenced++; if (referenced) { @@ -859,9 +860,9 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, return true; } -static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) +static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) { - struct page_referenced_arg *pra = arg; + struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; if (!mm_match_cgroup(vma->vm_mm, memcg)) @@ -871,27 +872,26 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) } /** - * page_referenced - test if the page was referenced - * @page: the page to test - * @is_locked: caller holds lock on the page + * folio_referenced() - Test if the folio was referenced. + * @folio: The folio to test. + * @is_locked: Caller holds lock on the folio. * @memcg: target memory cgroup - * @vm_flags: collect encountered vma->vm_flags who actually referenced the page + * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * - * Quick test_and_clear_referenced for all mappings to a page, - * returns the number of ptes which referenced the page. + * Quick test_and_clear_referenced for all mappings of a folio, + * + * Return: The number of mappings which referenced the folio. */ -int page_referenced(struct page *page, - int is_locked, - struct mem_cgroup *memcg, - unsigned long *vm_flags) +int folio_referenced(struct folio *folio, int is_locked, + struct mem_cgroup *memcg, unsigned long *vm_flags) { int we_locked = 0; - struct page_referenced_arg pra = { - .mapcount = total_mapcount(page), + struct folio_referenced_arg pra = { + .mapcount = folio_mapcount(folio), .memcg = memcg, }; struct rmap_walk_control rwc = { - .rmap_one = page_referenced_one, + .rmap_one = folio_referenced_one, .arg = (void *)&pra, .anon_lock = page_lock_anon_vma_read, }; @@ -900,11 +900,11 @@ int page_referenced(struct page *page, if (!pra.mapcount) return 0; - if (!page_rmapping(page)) + if (!folio_raw_mapping(folio)) return 0; - if (!is_locked && (!PageAnon(page) || PageKsm(page))) { - we_locked = trylock_page(page); + if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { + we_locked = folio_trylock(folio); if (!we_locked) return 1; } @@ -915,14 +915,14 @@ int page_referenced(struct page *page, * cgroups */ if (memcg) { - rwc.invalid_vma = invalid_page_referenced_vma; + rwc.invalid_vma = invalid_folio_referenced_vma; } - rmap_walk(page, &rwc); + rmap_walk(&folio->page, &rwc); *vm_flags = pra.vm_flags; if (we_locked) - unlock_page(page); + folio_unlock(folio); return pra.referenced; } @@ -1052,8 +1052,8 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written - * simultaneously, so a concurrent reader (eg page_referenced()'s - * PageAnon()) will not see one without the other. + * simultaneously, so a concurrent reader (eg folio_referenced()'s + * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 94729d2d1125..38f124c41bcd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1386,11 +1386,12 @@ enum page_references { static enum page_references page_check_references(struct page *page, struct scan_control *sc) { + struct folio *folio = page_folio(page); int referenced_ptes, referenced_page; unsigned long vm_flags; - referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, - &vm_flags); + referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, + &vm_flags); referenced_page = TestClearPageReferenced(page); /* @@ -2490,7 +2491,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold lru_lock across the whole operation. But if - * the pages are mapped, the processing is slow (page_referenced()), so + * the pages are mapped, the processing is slow (folio_referenced()), so * we should drop lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because @@ -2510,7 +2511,6 @@ static void shrink_active_list(unsigned long nr_to_scan, LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); - struct page *page; unsigned nr_deactivate, nr_activate; unsigned nr_rotated = 0; int file = is_file_lru(lru); @@ -2532,9 +2532,13 @@ static void shrink_active_list(unsigned long nr_to_scan, spin_unlock_irq(&lruvec->lru_lock); while (!list_empty(&l_hold)) { + struct folio *folio; + struct page *page; + cond_resched(); - page = lru_to_page(&l_hold); - list_del(&page->lru); + folio = lru_to_folio(&l_hold); + list_del(&folio->lru); + page = &folio->page; if (unlikely(!page_evictable(page))) { putback_lru_page(page); @@ -2549,8 +2553,8 @@ static void shrink_active_list(unsigned long nr_to_scan, } } - if (page_referenced(page, 0, sc->target_mem_cgroup, - &vm_flags)) { + if (folio_referenced(folio, 0, sc->target_mem_cgroup, + &vm_flags)) { /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So -- cgit v1.2.3-71-gd317 From af28a988b313a601c12c410a42e485ca46adcfee Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 21 Jan 2022 10:44:52 -0500 Subject: mm/huge_memory: Convert __split_huge_pmd() to take a folio Convert split_huge_pmd_address() at the same time since it only passes the folio through, and its two callers already have a folio on hand. Removes numerous calls to compound_head() and removes an assumption that a page cannot be larger than a PMD. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/huge_mm.h | 8 ++++---- mm/huge_memory.c | 46 +++++++++++++++++++++++----------------------- mm/rmap.c | 6 ++++-- 3 files changed, 31 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 71c073d411ac..4368b314d9c8 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -194,7 +194,7 @@ static inline int split_huge_page(struct page *page) void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page); + unsigned long address, bool freeze, struct folio *folio); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ @@ -207,7 +207,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, - bool freeze, struct page *page); + bool freeze, struct folio *folio); void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address); @@ -406,9 +406,9 @@ static inline void deferred_split_huge_page(struct page *page) {} do { } while (0) static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page) {} + unsigned long address, bool freeze, struct folio *folio) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, - unsigned long address, bool freeze, struct page *page) {} + unsigned long address, bool freeze, struct folio *folio) {} #define split_huge_pud(__vma, __pmd, __address) \ do { } while (0) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index beebe4105659..583b735a079b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2113,11 +2113,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, } void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page) + unsigned long address, bool freeze, struct folio *folio) { spinlock_t *ptl; struct mmu_notifier_range range; - bool do_unlock_page = false; + bool do_unlock_folio = false; pmd_t _pmd; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, @@ -2127,20 +2127,20 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ptl = pmd_lock(vma->vm_mm, pmd); /* - * If caller asks to setup a migration entries, we need a page to check - * pmd against. Otherwise we can end up replacing wrong page. + * If caller asks to setup a migration entry, we need a folio to check + * pmd against. Otherwise we can end up replacing wrong folio. */ - VM_BUG_ON(freeze && !page); - if (page) { - VM_WARN_ON_ONCE(!PageLocked(page)); - if (page != pmd_page(*pmd)) + VM_BUG_ON(freeze && !folio); + if (folio) { + VM_WARN_ON_ONCE(!folio_test_locked(folio)); + if (folio != page_folio(pmd_page(*pmd))) goto out; } repeat: if (pmd_trans_huge(*pmd)) { - if (!page) { - page = pmd_page(*pmd); + if (!folio) { + folio = page_folio(pmd_page(*pmd)); /* * An anonymous page must be locked, to ensure that a * concurrent reuse_swap_page() sees stable mapcount; @@ -2148,22 +2148,22 @@ repeat: * and page lock must not be taken when zap_pmd_range() * calls __split_huge_pmd() while i_mmap_lock is held. */ - if (PageAnon(page)) { - if (unlikely(!trylock_page(page))) { - get_page(page); + if (folio_test_anon(folio)) { + if (unlikely(!folio_trylock(folio))) { + folio_get(folio); _pmd = *pmd; spin_unlock(ptl); - lock_page(page); + folio_lock(folio); spin_lock(ptl); if (unlikely(!pmd_same(*pmd, _pmd))) { - unlock_page(page); - put_page(page); - page = NULL; + folio_unlock(folio); + folio_put(folio); + folio = NULL; goto repeat; } - put_page(page); + folio_put(folio); } - do_unlock_page = true; + do_unlock_folio = true; } } } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) @@ -2171,8 +2171,8 @@ repeat: __split_huge_pmd_locked(vma, pmd, range.start, freeze); out: spin_unlock(ptl); - if (do_unlock_page) - unlock_page(page); + if (do_unlock_folio) + folio_unlock(folio); /* * No need to double call mmu_notifier->invalidate_range() callback. * They are 3 cases to consider inside __split_huge_pmd_locked(): @@ -2190,7 +2190,7 @@ out: } void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, - bool freeze, struct page *page) + bool freeze, struct folio *folio) { pgd_t *pgd; p4d_t *p4d; @@ -2211,7 +2211,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, pmd = pmd_offset(pud, address); - __split_huge_pmd(vma, pmd, address, freeze, page); + __split_huge_pmd(vma, pmd, address, freeze, folio); } static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) diff --git a/mm/rmap.c b/mm/rmap.c index 36eef54d05d8..8b3d44e56e30 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1410,6 +1410,7 @@ out: static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { + struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; @@ -1428,7 +1429,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, pvmw.flags = PVMW_SYNC; if (flags & TTU_SPLIT_HUGE_PMD) - split_huge_pmd_address(vma, address, false, page); + split_huge_pmd_address(vma, address, false, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. @@ -1700,6 +1701,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags) static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { + struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; @@ -1722,7 +1724,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * TTU_SPLIT_HUGE_PMD and it wants to freeze. */ if (flags & TTU_SPLIT_HUGE_PMD) - split_huge_pmd_address(vma, address, true, page); + split_huge_pmd_address(vma, address, true, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. -- cgit v1.2.3-71-gd317 From 869f7ee6f6477341f859c8b0949ae81caf9ca7f3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 15 Feb 2022 09:28:49 -0500 Subject: mm/rmap: Convert try_to_unmap() to take a folio Change all three callers and the worker function try_to_unmap_one(). Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 4 +-- mm/huge_memory.c | 3 +- mm/khugepaged.c | 3 +- mm/memory-failure.c | 7 +++-- mm/memory_hotplug.c | 13 ++++---- mm/rmap.c | 83 +++++++++++++++++++++++++++------------------------- mm/vmscan.c | 2 +- 7 files changed, 62 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 69a1664216de..a0c5c38c733f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -194,7 +194,7 @@ int folio_referenced(struct folio *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); void try_to_migrate(struct page *page, enum ttu_flags flags); -void try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_unmap(struct folio *, enum ttu_flags flags); int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct page **pages, @@ -309,7 +309,7 @@ static inline int folio_referenced(struct folio *folio, int is_locked, return 0; } -static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) { } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 583b735a079b..de684427f79c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2251,6 +2251,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, static void unmap_page(struct page *page) { + struct folio *folio = page_folio(page); enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC; @@ -2264,7 +2265,7 @@ static void unmap_page(struct page *page) if (PageAnon(page)) try_to_migrate(page, ttu_flags); else - try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK); + try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fa05e6d39783..1cdf7c38b9e5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1834,7 +1834,8 @@ static void collapse_file(struct mm_struct *mm, } if (page_mapped(page)) - try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); + try_to_unmap(page_folio(page), + TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); xas_lock_irq(&xas); xas_set(&xas, index); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 0b72a936b8dd..258913d5e036 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1347,6 +1347,7 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, int flags, struct page *hpage) { + struct folio *folio = page_folio(hpage); enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC; struct address_space *mapping; LIST_HEAD(tokill); @@ -1412,7 +1413,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); if (!PageHuge(hpage)) { - try_to_unmap(hpage, ttu); + try_to_unmap(folio, ttu); } else { if (!PageAnon(hpage)) { /* @@ -1424,12 +1425,12 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, */ mapping = hugetlb_page_mapping_lock_write(hpage); if (mapping) { - try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); + try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); } else pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); } else { - try_to_unmap(hpage, ttu); + try_to_unmap(folio, ttu); } } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2a9627dc784c..914057da53c7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1690,10 +1690,13 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) DEFAULT_RATELIMIT_BURST); for (pfn = start_pfn; pfn < end_pfn; pfn++) { + struct folio *folio; + if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); - head = compound_head(page); + folio = page_folio(page); + head = &folio->page; if (PageHuge(page)) { pfn = page_to_pfn(head) + compound_nr(head) - 1; @@ -1710,10 +1713,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * the unmap as the catch all safety net). */ if (PageHWPoison(page)) { - if (WARN_ON(PageLRU(page))) - isolate_lru_page(page); - if (page_mapped(page)) - try_to_unmap(page, TTU_IGNORE_MLOCK); + if (WARN_ON(folio_test_lru(folio))) + folio_isolate_lru(folio); + if (folio_mapped(folio)) + try_to_unmap(folio, TTU_IGNORE_MLOCK); continue; } diff --git a/mm/rmap.c b/mm/rmap.c index 8b3d44e56e30..cf6e3de9d2f7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1412,7 +1412,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, { struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -1436,13 +1436,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * - * Note that the page can not be free in this function as call of - * try_to_unmap() must hold a reference on the page. + * Note that the folio can not be freed in this function as call of + * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address, range.end); - if (PageHuge(page)) { + if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. @@ -1454,24 +1454,25 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_PAGE(!pvmw.pte, page); + VM_BUG_ON_FOLIO(!pvmw.pte, folio); /* - * If the page is in an mlock()d vma, we must not swap it out. + * If the folio is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ - mlock_vma_page(page, vma, false); + mlock_vma_folio(folio, vma, false); page_vma_mapped_walk_done(&pvmw); ret = false; break; } - subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); + subpage = folio_page(folio, + pte_pfn(*pvmw.pte) - folio_pfn(folio)); address = pvmw.address; - if (PageHuge(page) && !PageAnon(page)) { + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly @@ -1510,7 +1511,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially - * a remote CPU could still be writing to the page. + * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through @@ -1523,22 +1524,22 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, pteval = ptep_clear_flush(vma, address, pvmw.pte); } - /* Move the dirty bit to the page. Now the pte is gone. */ + /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) - set_page_dirty(page); + folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { + if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); - if (PageHuge(page)) { - hugetlb_count_sub(compound_nr(page), mm); + if (folio_test_hugetlb(folio)) { + hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_swap_pte_at(mm, address, pvmw.pte, pteval, vma_mmu_pagesize(vma)); } else { - dec_mm_counter(mm, mm_counter(page)); + dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } @@ -1553,18 +1554,19 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * migration) will not expect userfaults on already * copied pages. */ - dec_mm_counter(mm, mm_counter(page)); + dec_mm_counter(mm, mm_counter(&folio->page)); /* We have to invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); - } else if (PageAnon(page)) { + } else if (folio_test_anon(folio)) { swp_entry_t entry = { .val = page_private(subpage) }; pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ - if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { + if (unlikely(folio_test_swapbacked(folio) != + folio_test_swapcache(folio))) { WARN_ON_ONCE(1); ret = false; /* We have to invalidate as we cleared the pte */ @@ -1575,8 +1577,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } /* MADV_FREE page check */ - if (!PageSwapBacked(page)) { - if (!PageDirty(page)) { + if (!folio_test_swapbacked(folio)) { + if (!folio_test_dirty(folio)) { /* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); @@ -1585,11 +1587,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } /* - * If the page was redirtied, it cannot be + * If the folio was redirtied, it cannot be * discarded. Remap the page to page table. */ set_pte_at(mm, address, pvmw.pte, pteval); - SetPageSwapBacked(page); + folio_set_swapbacked(folio); ret = false; page_vma_mapped_walk_done(&pvmw); break; @@ -1626,16 +1628,17 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, address + PAGE_SIZE); } else { /* - * This is a locked file-backed page, thus it cannot - * be removed from the page cache and replaced by a new - * page before mmu_notifier_invalidate_range_end, so no - * concurrent thread might update its page table to - * point at new page while a device still is using this - * page. + * This is a locked file-backed folio, + * so it cannot be removed from the page + * cache and replaced by a new folio before + * mmu_notifier_invalidate_range_end, so no + * concurrent thread might update its page table + * to point at a new folio while a device is + * still using this folio. * * See Documentation/vm/mmu_notifier.rst */ - dec_mm_counter(mm, mm_counter_file(page)); + dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: /* @@ -1645,10 +1648,10 @@ discard: * * See Documentation/vm/mmu_notifier.rst */ - page_remove_rmap(subpage, vma, PageHuge(page)); + page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) mlock_page_drain(smp_processor_id()); - put_page(page); + folio_put(folio); } mmu_notifier_invalidate_range_end(&range); @@ -1667,17 +1670,17 @@ static int page_not_mapped(struct page *page) } /** - * try_to_unmap - try to remove all page table mappings to a page - * @page: the page to get unmapped + * try_to_unmap - Try to remove all page table mappings to a folio. + * @folio: The folio to unmap. * @flags: action and flags * * Tries to remove all the page table entries which are mapping this - * page, used in the pageout path. Caller must hold the page lock. + * folio. It is the caller's responsibility to check if the folio is + * still mapped if needed (use TTU_SYNC to prevent accounting races). * - * It is the caller's responsibility to check if the page is still - * mapped when needed (use TTU_SYNC to prevent accounting races). + * Context: Caller must hold the folio lock. */ -void try_to_unmap(struct page *page, enum ttu_flags flags) +void try_to_unmap(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, @@ -1687,9 +1690,9 @@ void try_to_unmap(struct page *page, enum ttu_flags flags) }; if (flags & TTU_RMAP_LOCKED) - rmap_walk_locked(page, &rwc); + rmap_walk_locked(&folio->page, &rwc); else - rmap_walk(page, &rwc); + rmap_walk(&folio->page, &rwc); } /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 38f124c41bcd..a57eb747f08d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1768,7 +1768,7 @@ retry: if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - try_to_unmap(page, flags); + try_to_unmap(folio, flags); if (page_mapped(page)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && PageSwapBacked(page)) -- cgit v1.2.3-71-gd317 From 4b8554c527f3cfa183f6c06d231a9387873205a0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 28 Jan 2022 14:29:43 -0500 Subject: mm/rmap: Convert try_to_migrate() to folios Convert the callers to pass a folio and the try_to_migrate_one() worker to use a folio throughout. Fixes an assumption that a folio must be <= PMD size. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 2 +- mm/huge_memory.c | 4 ++-- mm/migrate.c | 6 ++++-- mm/migrate_device.c | 6 ++++-- mm/rmap.c | 59 +++++++++++++++++++++++++++------------------------- 5 files changed, 42 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a0c5c38c733f..e6e935c81414 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -193,7 +193,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) int folio_referenced(struct folio *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -void try_to_migrate(struct page *page, enum ttu_flags flags); +void try_to_migrate(struct folio *folio, enum ttu_flags flags); void try_to_unmap(struct folio *, enum ttu_flags flags); int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index de684427f79c..7df1934d6528 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2262,8 +2262,8 @@ static void unmap_page(struct page *page) * pages can simply be left unmapped, then faulted back on demand. * If that is ever changed (perhaps for mlock), update remap_page(). */ - if (PageAnon(page)) - try_to_migrate(page, ttu_flags); + if (folio_test_anon(folio)) + try_to_migrate(folio, ttu_flags); else try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); diff --git a/mm/migrate.c b/mm/migrate.c index 358bc311caaa..6ed85a5d1be5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -912,6 +912,7 @@ out: static int __unmap_and_move(struct page *page, struct page *newpage, int force, enum migrate_mode mode) { + struct folio *folio = page_folio(page); int rc = -EAGAIN; bool page_was_mapped = false; struct anon_vma *anon_vma = NULL; @@ -1015,7 +1016,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes */ VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, page); - try_to_migrate(page, 0); + try_to_migrate(folio, 0); page_was_mapped = true; } @@ -1165,6 +1166,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, enum migrate_mode mode, int reason, struct list_head *ret) { + struct folio *src = page_folio(hpage); int rc = -EAGAIN; int page_was_mapped = 0; struct page *new_hpage; @@ -1241,7 +1243,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ttu |= TTU_RMAP_LOCKED; } - try_to_migrate(hpage, ttu); + try_to_migrate(src, ttu); page_was_mapped = 1; if (mapping_locked) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 0326b901d2fd..b2c611d4bdb2 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -333,6 +333,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); + struct folio *folio; if (!page) continue; @@ -356,8 +357,9 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) put_page(page); } - if (page_mapped(page)) - try_to_migrate(page, 0); + folio = page_folio(page); + if (folio_mapped(folio)) + try_to_migrate(folio, 0); if (page_mapped(page) || !migrate_vma_check_page(page)) { if (!is_zone_device_page(page)) { diff --git a/mm/rmap.c b/mm/rmap.c index cf6e3de9d2f7..8497da29193c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1706,7 +1706,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, { struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -1740,7 +1740,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address, range.end); - if (PageHuge(page)) { + if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. @@ -1754,21 +1754,24 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { - VM_BUG_ON_PAGE(PageHuge(page) || - !PageTransCompound(page), page); + subpage = folio_page(folio, + pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); + VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || + !folio_test_pmd_mappable(folio), folio); - set_pmd_migration_entry(&pvmw, page); + set_pmd_migration_entry(&pvmw, subpage); continue; } #endif /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_PAGE(!pvmw.pte, page); + VM_BUG_ON_FOLIO(!pvmw.pte, folio); - subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); + subpage = folio_page(folio, + pte_pfn(*pvmw.pte) - folio_pfn(folio)); address = pvmw.address; - if (PageHuge(page) && !PageAnon(page)) { + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly @@ -1806,15 +1809,15 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); pteval = ptep_clear_flush(vma, address, pvmw.pte); - /* Move the dirty bit to the page. Now the pte is gone. */ + /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) - set_page_dirty(page); + folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (is_zone_device_page(page)) { - unsigned long pfn = page_to_pfn(page); + if (folio_is_zone_device(folio)) { + unsigned long pfn = folio_pfn(folio); swp_entry_t entry; pte_t swp_pte; @@ -1850,16 +1853,16 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * changed when hugepage migrations to device private * memory are supported. */ - subpage = page; - } else if (PageHWPoison(page)) { + subpage = &folio->page; + } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); - if (PageHuge(page)) { - hugetlb_count_sub(compound_nr(page), mm); + if (folio_test_hugetlb(folio)) { + hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_swap_pte_at(mm, address, pvmw.pte, pteval, vma_mmu_pagesize(vma)); } else { - dec_mm_counter(mm, mm_counter(page)); + dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } @@ -1874,7 +1877,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * migration) will not expect userfaults on already * copied pages. */ - dec_mm_counter(mm, mm_counter(page)); + dec_mm_counter(mm, mm_counter(&folio->page)); /* We have to invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); @@ -1920,10 +1923,10 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, * * See Documentation/vm/mmu_notifier.rst */ - page_remove_rmap(subpage, vma, PageHuge(page)); + page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) mlock_page_drain(smp_processor_id()); - put_page(page); + folio_put(folio); } mmu_notifier_invalidate_range_end(&range); @@ -1933,13 +1936,13 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, /** * try_to_migrate - try to replace all page table mappings with swap entries - * @page: the page to replace page table entries for + * @folio: the folio to replace page table entries for * @flags: action and flags * - * Tries to remove all the page table entries which are mapping this page and - * replace them with special swap entries. Caller must hold the page lock. + * Tries to remove all the page table entries which are mapping this folio and + * replace them with special swap entries. Caller must hold the folio lock. */ -void try_to_migrate(struct page *page, enum ttu_flags flags) +void try_to_migrate(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_migrate_one, @@ -1956,7 +1959,7 @@ void try_to_migrate(struct page *page, enum ttu_flags flags) TTU_SYNC))) return; - if (is_zone_device_page(page) && !is_device_private_page(page)) + if (folio_is_zone_device(folio) && !folio_is_device_private(folio)) return; /* @@ -1967,13 +1970,13 @@ void try_to_migrate(struct page *page, enum ttu_flags flags) * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ - if (!PageKsm(page) && PageAnon(page)) + if (!folio_test_ksm(folio) && folio_test_anon(folio)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) - rmap_walk_locked(page, &rwc); + rmap_walk_locked(&folio->page, &rwc); else - rmap_walk(page, &rwc); + rmap_walk(&folio->page, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE -- cgit v1.2.3-71-gd317 From 4eecb8b9163df82c87c91764a02fff228ef25f6d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 28 Jan 2022 23:32:59 -0500 Subject: mm/migrate: Convert remove_migration_ptes() to folios Convert the implementation and all callers. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 2 +- mm/huge_memory.c | 24 ++++++++++++----------- mm/migrate.c | 55 ++++++++++++++++++++++++++++------------------------ mm/migrate_device.c | 15 +++++++++----- 4 files changed, 54 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index e6e935c81414..21af80d5b711 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -261,7 +261,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); */ int folio_mkclean(struct folio *); -void remove_migration_ptes(struct page *old, struct page *new, bool locked); +void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); /* * Called by memory-failure.c to kill processes. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7df1934d6528..d55b25f1ceba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2270,18 +2270,19 @@ static void unmap_page(struct page *page) VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); } -static void remap_page(struct page *page, unsigned int nr) +static void remap_page(struct folio *folio, unsigned long nr) { - int i; + int i = 0; /* If unmap_page() uses try_to_migrate() on file, remove this check */ - if (!PageAnon(page)) + if (!folio_test_anon(folio)) return; - if (PageTransHuge(page)) { - remove_migration_ptes(page, page, true); - } else { - for (i = 0; i < nr; i++) - remove_migration_ptes(page + i, page + i, true); + for (;;) { + remove_migration_ptes(folio, folio, true); + i += folio_nr_pages(folio); + if (i >= nr) + break; + folio = folio_next(folio); } } @@ -2441,7 +2442,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, } local_irq_enable(); - remap_page(head, nr); + remap_page(folio, nr); if (PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; @@ -2550,7 +2551,8 @@ bool can_split_huge_page(struct page *page, int *pextra_pins) */ int split_huge_page_to_list(struct page *page, struct list_head *list) { - struct page *head = compound_head(page); + struct folio *folio = page_folio(page); + struct page *head = &folio->page; struct deferred_split *ds_queue = get_deferred_split_queue(head); XA_STATE(xas, &head->mapping->i_pages, head->index); struct anon_vma *anon_vma = NULL; @@ -2667,7 +2669,7 @@ fail: if (mapping) xas_unlock(&xas); local_irq_enable(); - remap_page(head, thp_nr_pages(head)); + remap_page(folio, folio_nr_pages(folio)); ret = -EBUSY; } diff --git a/mm/migrate.c b/mm/migrate.c index 6ed85a5d1be5..eba3cd5376e3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -174,30 +174,32 @@ void putback_movable_pages(struct list_head *l) static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *old) { - DEFINE_PAGE_VMA_WALK(pvmw, (struct page *)old, vma, addr, - PVMW_SYNC | PVMW_MIGRATION); - struct page *new; - pte_t pte; - swp_entry_t entry; + struct folio *folio = page_folio(page); + DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); VM_BUG_ON_PAGE(PageTail(page), page); while (page_vma_mapped_walk(&pvmw)) { - if (PageKsm(page)) - new = page; - else - new = page - pvmw.pgoff + - linear_page_index(vma, pvmw.address); + pte_t pte; + swp_entry_t entry; + struct page *new; + unsigned long idx = 0; + + /* pgoff is invalid for ksm pages, but they are never large */ + if (folio_test_large(folio) && !folio_test_hugetlb(folio)) + idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; + new = folio_page(folio, idx); #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { - VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); + VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || + !folio_test_pmd_mappable(folio), folio); remove_migration_pmd(&pvmw, new); continue; } #endif - get_page(new); + folio_get(folio); pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(*pvmw.pte)) pte = pte_mksoft_dirty(pte); @@ -226,12 +228,12 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, } #ifdef CONFIG_HUGETLB_PAGE - if (PageHuge(new)) { + if (folio_test_hugetlb(folio)) { unsigned int shift = huge_page_shift(hstate_vma(vma)); pte = pte_mkhuge(pte); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); - if (PageAnon(new)) + if (folio_test_anon(folio)) hugepage_add_anon_rmap(new, vma, pvmw.address); else page_dup_rmap(new, true); @@ -239,7 +241,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, } else #endif { - if (PageAnon(new)) + if (folio_test_anon(folio)) page_add_anon_rmap(new, vma, pvmw.address, false); else page_add_file_rmap(new, vma, false); @@ -259,17 +261,17 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, * Get rid of all migration entries and replace them by * references to the indicated page. */ -void remove_migration_ptes(struct page *old, struct page *new, bool locked) +void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) { struct rmap_walk_control rwc = { .rmap_one = remove_migration_pte, - .arg = old, + .arg = src, }; if (locked) - rmap_walk_locked(new, &rwc); + rmap_walk_locked(&dst->page, &rwc); else - rmap_walk(new, &rwc); + rmap_walk(&dst->page, &rwc); } /* @@ -756,6 +758,7 @@ int buffer_migrate_page_norefs(struct address_space *mapping, */ static int writeout(struct address_space *mapping, struct page *page) { + struct folio *folio = page_folio(page); struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, @@ -781,7 +784,7 @@ static int writeout(struct address_space *mapping, struct page *page) * At this point we know that the migration attempt cannot * be successful. */ - remove_migration_ptes(page, page, false); + remove_migration_ptes(folio, folio, false); rc = mapping->a_ops->writepage(page, &wbc); @@ -913,6 +916,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, int force, enum migrate_mode mode) { struct folio *folio = page_folio(page); + struct folio *dst = page_folio(newpage); int rc = -EAGAIN; bool page_was_mapped = false; struct anon_vma *anon_vma = NULL; @@ -1039,8 +1043,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage, } if (page_was_mapped) - remove_migration_ptes(page, - rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); + remove_migration_ptes(folio, + rc == MIGRATEPAGE_SUCCESS ? dst : folio, false); out_unlock_both: unlock_page(newpage); @@ -1166,7 +1170,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, enum migrate_mode mode, int reason, struct list_head *ret) { - struct folio *src = page_folio(hpage); + struct folio *dst, *src = page_folio(hpage); int rc = -EAGAIN; int page_was_mapped = 0; struct page *new_hpage; @@ -1194,6 +1198,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, new_hpage = get_new_page(hpage, private); if (!new_hpage) return -ENOMEM; + dst = page_folio(new_hpage); if (!trylock_page(hpage)) { if (!force) @@ -1254,8 +1259,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, rc = move_to_new_page(new_hpage, hpage, mode); if (page_was_mapped) - remove_migration_ptes(hpage, - rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); + remove_migration_ptes(src, + rc == MIGRATEPAGE_SUCCESS ? dst : src, false); unlock_put_anon: unlock_page(new_hpage); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index b2c611d4bdb2..70c7dc05bbfc 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -376,15 +376,17 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) for (i = 0; i < npages && restore; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); + struct folio *folio; if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) continue; - remove_migration_ptes(page, page, false); + folio = page_folio(page); + remove_migration_ptes(folio, folio, false); migrate->src[i] = 0; - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); restore--; } } @@ -729,6 +731,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate) unsigned long i; for (i = 0; i < npages; i++) { + struct folio *dst, *src; struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); struct page *page = migrate_pfn_to_page(migrate->src[i]); @@ -748,8 +751,10 @@ void migrate_vma_finalize(struct migrate_vma *migrate) newpage = page; } - remove_migration_ptes(page, newpage, false); - unlock_page(page); + src = page_folio(page); + dst = page_folio(newpage); + remove_migration_ptes(src, dst, false); + folio_unlock(src); if (is_zone_device_page(page)) put_page(page); -- cgit v1.2.3-71-gd317 From 9595d76942b8714627d670a7e7ae543812c731ae Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 1 Feb 2022 23:33:08 -0500 Subject: mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() Add back page_lock_anon_vma_read() as a wrapper. This saves a few calls to compound_head(). If any callers were passing a tail page before, this would have failed to lock the anon VMA as page->mapping is not valid for tail pages. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/rmap.h | 1 + mm/folio-compat.c | 7 +++++++ mm/memory-failure.c | 3 ++- mm/rmap.c | 12 ++++++------ 4 files changed, 16 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 21af80d5b711..be020d38b0a5 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -267,6 +267,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); +struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 46fa179e32fb..968ad97bbffa 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -164,3 +164,10 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } + +#ifdef CONFIG_MMU +struct anon_vma *page_lock_anon_vma_read(struct page *page) +{ + return folio_lock_anon_vma_read(page_folio(page)); +} +#endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 258913d5e036..aa8236848949 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk, static void collect_procs_anon(struct page *page, struct list_head *to_kill, int force_early) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; - av = page_lock_anon_vma_read(page); + av = folio_lock_anon_vma_read(folio); if (av == NULL) /* Not actually mapped anymore */ return; diff --git a/mm/rmap.c b/mm/rmap.c index c74de8af7eec..64655d345234 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -526,28 +526,28 @@ out: * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with page_get_anon_vma() and then block on the mutex. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page) +struct anon_vma *folio_lock_anon_vma_read(struct folio *folio) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; rcu_read_lock(); - anon_mapping = (unsigned long)READ_ONCE(page->mapping); + anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; - if (!page_mapped(page)) + if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* - * If the page is still mapped, then this anon_vma is still + * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } @@ -560,7 +560,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page) goto out; } - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; -- cgit v1.2.3-71-gd317 From e05b34539d008ab819388f699b25eae962ba24ac Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 29 Jan 2022 11:52:52 -0500 Subject: mm: Turn page_anon_vma() into folio_anon_vma() Move the prototype from mm.h to mm/internal.h and convert all callers to pass a folio. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/mm.h | 1 - mm/internal.h | 1 + mm/ksm.c | 3 ++- mm/rmap.c | 19 ++++++++++++------- mm/util.c | 3 +-- 5 files changed, 16 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d380dc26847..a879c583f665 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1730,7 +1730,6 @@ static inline void *folio_address(const struct folio *folio) } extern void *page_rmapping(struct page *page); -extern struct anon_vma *page_anon_vma(struct page *page); extern pgoff_t __page_file_index(struct page *page); /* diff --git a/mm/internal.h b/mm/internal.h index 6039acc780c0..2b2c2c4eb63a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -392,6 +392,7 @@ static inline bool is_data_mapping(vm_flags_t flags) void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev); void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); +struct anon_vma *folio_anon_vma(struct folio *folio); #ifdef CONFIG_MMU void unmap_mapping_folio(struct folio *folio); diff --git a/mm/ksm.c b/mm/ksm.c index ea82fef93a31..b25d545e0cd1 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2554,7 +2554,8 @@ void __ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { - struct anon_vma *anon_vma = page_anon_vma(page); + struct folio *folio = page_folio(page); + struct anon_vma *anon_vma = folio_anon_vma(folio); struct page *new_page; if (PageKsm(page)) { diff --git a/mm/rmap.c b/mm/rmap.c index 64655d345234..09301aecf2fc 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -737,8 +737,9 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { - if (PageAnon(page)) { - struct anon_vma *page__anon_vma = page_anon_vma(page); + struct folio *folio = page_folio(page); + if (folio_test_anon(folio)) { + struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. @@ -748,7 +749,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) return -EFAULT; } else if (!vma->vm_file) { return -EFAULT; - } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { + } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; } @@ -1103,6 +1104,7 @@ static void __page_set_anon_rmap(struct page *page, static void __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + struct folio *folio = page_folio(page); /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. @@ -1114,7 +1116,8 @@ static void __page_check_anon_rmap(struct page *page, * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ - VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); + VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, + folio); VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), page); } @@ -2177,6 +2180,7 @@ void __put_anon_vma(struct anon_vma *anon_vma) static struct anon_vma *rmap_walk_anon_lock(struct page *page, struct rmap_walk_control *rwc) { + struct folio *folio = page_folio(page); struct anon_vma *anon_vma; if (rwc->anon_lock) @@ -2188,7 +2192,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing */ - anon_vma = page_anon_vma(page); + anon_vma = folio_anon_vma(folio); if (!anon_vma) return NULL; @@ -2208,14 +2212,15 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, bool locked) { + struct folio *folio = page_folio(page); struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; if (locked) { - anon_vma = page_anon_vma(page); + anon_vma = folio_anon_vma(folio); /* anon_vma disappear under us? */ - VM_BUG_ON_PAGE(!anon_vma, page); + VM_BUG_ON_FOLIO(!anon_vma, folio); } else { anon_vma = rmap_walk_anon_lock(page, rwc); } diff --git a/mm/util.c b/mm/util.c index b614f423aaa4..13fc88ac8e70 100644 --- a/mm/util.c +++ b/mm/util.c @@ -679,9 +679,8 @@ bool folio_mapped(struct folio *folio) } EXPORT_SYMBOL(folio_mapped); -struct anon_vma *page_anon_vma(struct page *page) +struct anon_vma *folio_anon_vma(struct folio *folio) { - struct folio *folio = page_folio(page); unsigned long mapping = (unsigned long)folio->mapping; if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) -- cgit v1.2.3-71-gd317 From 2f031c6f042cb8a9b221a8b6b80e69de5170f830 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 29 Jan 2022 16:06:53 -0500 Subject: mm/rmap: Convert rmap_walk() to take a folio This ripples all the way through to every calling and called function from rmap. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/ksm.h | 4 +- include/linux/rmap.h | 11 +++-- mm/damon/paddr.c | 15 ++++--- mm/folio-compat.c | 7 ---- mm/huge_memory.c | 2 +- mm/ksm.c | 12 +++--- mm/migrate.c | 10 ++--- mm/page_idle.c | 7 ++-- mm/rmap.c | 111 ++++++++++++++++++++++++--------------------------- 9 files changed, 80 insertions(+), 99 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a38a5bca1ba5..0b4f17418f64 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); -void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); +void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); #else /* !CONFIG_KSM */ @@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, return page; } -static inline void rmap_walk_ksm(struct page *page, +static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be020d38b0a5..a74d811c5b3f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); /* * Called by memory-failure.c to kill processes. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page); struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); @@ -286,15 +285,15 @@ struct rmap_walk_control { * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ - bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, + bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); - int (*done)(struct page *page); - struct anon_vma *(*anon_lock)(struct page *page); + int (*done)(struct folio *folio); + struct anon_vma *(*anon_lock)(struct folio *folio); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; -void rmap_walk(struct page *page, struct rmap_walk_control *rwc); -void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); +void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); +void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index eee8a3395d71..ae24549921e2 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -16,10 +16,10 @@ #include "../internal.h" #include "prmtv-common.h" -static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, +static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; @@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr) struct page *page = damon_get_page(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { .rmap_one = __damon_pa_mkold, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; @@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr) if (need_lock && !folio_trylock(folio)) goto out; - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); @@ -87,10 +87,9 @@ struct damon_pa_access_chk_result { bool accessed; }; -static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, +static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct folio *folio = page_folio(page); struct damon_pa_access_chk_result *result = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); @@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) struct rmap_walk_control rwc = { .arg = &result, .rmap_one = __damon_pa_young, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; @@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) return NULL; } - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 968ad97bbffa..46fa179e32fb 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -164,10 +164,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } - -#ifdef CONFIG_MMU -struct anon_vma *page_lock_anon_vma_read(struct page *page) -{ - return folio_lock_anon_vma_read(page_folio(page)); -} -#endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d55b25f1ceba..d874d50e703b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a * reference to it and then lock the anon_vma for write. This - * is similar to page_lock_anon_vma_read except the write lock + * is similar to folio_lock_anon_vma_read except the write lock * is taken to serialise against parallel split or collapse * operations. */ diff --git a/mm/ksm.c b/mm/ksm.c index b25d545e0cd1..dd737f925c04 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page, return new_page; } -void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) +void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { struct stable_node *stable_node; struct rmap_item *rmap_item; int search_new_forks = 0; - VM_BUG_ON_PAGE(!PageKsm(page), page); + VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); /* * Rely on the page lock to protect against concurrent modifications * to that page's node of the stable tree. */ - VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - stable_node = page_stable_node(page); + stable_node = folio_stable_node(folio); if (!stable_node) return; again: @@ -2637,11 +2637,11 @@ again: if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; - if (!rwc->rmap_one(page, vma, addr, rwc->arg)) { + if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { anon_vma_unlock_read(anon_vma); return; } - if (rwc->done && rwc->done(page)) { + if (rwc->done && rwc->done(folio)) { anon_vma_unlock_read(anon_vma); return; } diff --git a/mm/migrate.c b/mm/migrate.c index eba3cd5376e3..2defe9aa4d0e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l) /* * Restore a potential migration pte to a working pte entry */ -static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, - unsigned long addr, void *old) +static bool remove_migration_pte(struct folio *folio, + struct vm_area_struct *vma, unsigned long addr, void *old) { - struct folio *folio = page_folio(page); DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); - VM_BUG_ON_PAGE(PageTail(page), page); while (page_vma_mapped_walk(&pvmw)) { pte_t pte; swp_entry_t entry; @@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) }; if (locked) - rmap_walk_locked(&dst->page, &rwc); + rmap_walk_locked(dst, &rwc); else - rmap_walk(&dst->page, &rwc); + rmap_walk(dst, &rwc); } /* diff --git a/mm/page_idle.c b/mm/page_idle.c index 5c73a9b578da..e34ba04e22e2 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn) return page; } -static bool page_idle_clear_pte_refs_one(struct page *page, +static bool page_idle_clear_pte_refs_one(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct folio *folio = page_folio(page); DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); bool referenced = false; @@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page) */ static const struct rmap_walk_control rwc = { .rmap_one = page_idle_clear_pte_refs_one, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; @@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page) if (need_lock && !folio_trylock(folio)) return; - rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc); + rmap_walk(folio, (struct rmap_walk_control *)&rwc); if (need_lock) folio_unlock(folio); diff --git a/mm/rmap.c b/mm/rmap.c index 09301aecf2fc..2cc97c64901e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -107,15 +107,15 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* - * Synchronize against page_lock_anon_vma_read() such that + * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by - * down_read_trylock() from page_lock_anon_vma_read(). This orders: + * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * - * page_lock_anon_vma_read() VS put_anon_vma() + * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() @@ -168,7 +168,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, * allocate a new one. * * Anon-vma allocations are very subtle, because we may have - * optimistically looked up an anon_vma in page_lock_anon_vma_read() + * optimistically looked up an anon_vma in folio_lock_anon_vma_read() * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). @@ -799,10 +799,9 @@ struct folio_referenced_arg { /* * arg: folio_referenced_arg will be passed */ -static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, void *arg) +static bool folio_referenced_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long address, void *arg) { - struct folio *folio = page_folio(page); struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; @@ -894,7 +893,7 @@ int folio_referenced(struct folio *folio, int is_locked, struct rmap_walk_control rwc = { .rmap_one = folio_referenced_one, .arg = (void *)&pra, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; *vm_flags = 0; @@ -919,7 +918,7 @@ int folio_referenced(struct folio *folio, int is_locked, rwc.invalid_vma = invalid_folio_referenced_vma; } - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; if (we_locked) @@ -928,10 +927,9 @@ int folio_referenced(struct folio *folio, int is_locked, return pra.referenced; } -static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, +static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct folio *folio = page_folio(page); DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); struct mmu_notifier_range range; int *cleaned = arg; @@ -1025,7 +1023,7 @@ int folio_mkclean(struct folio *folio) if (!mapping) return 0; - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); return cleaned; } @@ -1410,10 +1408,9 @@ out: /* * @arg: enum ttu_flags will be passed to this argument */ -static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, +static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; @@ -1667,9 +1664,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) return vma_is_temporary_stack(vma); } -static int page_not_mapped(struct page *page) +static int page_not_mapped(struct folio *folio) { - return !page_mapped(page); + return !folio_mapped(folio); } /** @@ -1689,13 +1686,13 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags) .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = page_not_mapped, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; if (flags & TTU_RMAP_LOCKED) - rmap_walk_locked(&folio->page, &rwc); + rmap_walk_locked(folio, &rwc); else - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); } /* @@ -1704,10 +1701,9 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags) * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs * containing migration entries. */ -static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, +static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; @@ -1951,7 +1947,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) .rmap_one = try_to_migrate_one, .arg = (void *)flags, .done = page_not_mapped, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, }; /* @@ -1977,9 +1973,9 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) - rmap_walk_locked(&folio->page, &rwc); + rmap_walk_locked(folio, &rwc); else - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE @@ -1990,10 +1986,9 @@ struct make_exclusive_args { bool valid; }; -static bool page_make_device_exclusive_one(struct page *page, +static bool page_make_device_exclusive_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *priv) { - struct folio *folio = page_folio(page); struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); struct make_exclusive_args *args = priv; @@ -2098,7 +2093,7 @@ static bool folio_make_device_exclusive(struct folio *folio, struct rmap_walk_control rwc = { .rmap_one = page_make_device_exclusive_one, .done = page_not_mapped, - .anon_lock = page_lock_anon_vma_read, + .anon_lock = folio_lock_anon_vma_read, .arg = &args, }; @@ -2109,7 +2104,7 @@ static bool folio_make_device_exclusive(struct folio *folio, if (!folio_test_anon(folio)) return false; - rmap_walk(&folio->page, &rwc); + rmap_walk(folio, &rwc); return args.valid && !folio_mapcount(folio); } @@ -2177,17 +2172,16 @@ void __put_anon_vma(struct anon_vma *anon_vma) anon_vma_free(root); } -static struct anon_vma *rmap_walk_anon_lock(struct page *page, +static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, struct rmap_walk_control *rwc) { - struct folio *folio = page_folio(page); struct anon_vma *anon_vma; if (rwc->anon_lock) - return rwc->anon_lock(page); + return rwc->anon_lock(folio); /* - * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() + * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing @@ -2209,10 +2203,9 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. */ -static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, +static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { - struct folio *folio = page_folio(page); struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; @@ -2222,17 +2215,17 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, /* anon_vma disappear under us? */ VM_BUG_ON_FOLIO(!anon_vma, folio); } else { - anon_vma = rmap_walk_anon_lock(page, rwc); + anon_vma = rmap_walk_anon_lock(folio, rwc); } if (!anon_vma) return; - pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + thp_nr_pages(page) - 1; + pgoff_start = folio_pgoff(folio); + pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(page, vma); + unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2240,9 +2233,9 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; - if (!rwc->rmap_one(page, vma, address, rwc->arg)) + if (!rwc->rmap_one(folio, vma, address, rwc->arg)) break; - if (rwc->done && rwc->done(page)) + if (rwc->done && rwc->done(folio)) break; } @@ -2258,10 +2251,10 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ -static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, +static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping = folio_mapping(folio); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; @@ -2271,18 +2264,18 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_rwsem. */ - VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!mapping) return; - pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + thp_nr_pages(page) - 1; + pgoff_start = folio_pgoff(folio); + pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_address(page, vma); + unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2290,9 +2283,9 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; - if (!rwc->rmap_one(page, vma, address, rwc->arg)) + if (!rwc->rmap_one(folio, vma, address, rwc->arg)) goto done; - if (rwc->done && rwc->done(page)) + if (rwc->done && rwc->done(folio)) goto done; } @@ -2301,25 +2294,25 @@ done: i_mmap_unlock_read(mapping); } -void rmap_walk(struct page *page, struct rmap_walk_control *rwc) +void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { - if (unlikely(PageKsm(page))) - rmap_walk_ksm(page, rwc); - else if (PageAnon(page)) - rmap_walk_anon(page, rwc, false); + if (unlikely(folio_test_ksm(folio))) + rmap_walk_ksm(folio, rwc); + else if (folio_test_anon(folio)) + rmap_walk_anon(folio, rwc, false); else - rmap_walk_file(page, rwc, false); + rmap_walk_file(folio, rwc, false); } /* Like rmap_walk, but caller holds relevant rmap lock */ -void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) +void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) { /* no ksm support for now */ - VM_BUG_ON_PAGE(PageKsm(page), page); - if (PageAnon(page)) - rmap_walk_anon(page, rwc, true); + VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); + if (folio_test_anon(folio)) + rmap_walk_anon(folio, rwc, true); else - rmap_walk_file(page, rwc, true); + rmap_walk_file(folio, rwc, true); } #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3-71-gd317 From 84fbbe21894bb9be8e16df408cbfbb9466fe396d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 29 Jan 2022 16:16:54 -0500 Subject: mm/rmap: Constify the rmap_walk_control argument The rmap walking functions do not modify the rmap_walk_control, and page_idle_clear_pte_refs() takes advantage of that to move construction of the rmap_walk_control to compile time. This lets us remove an unclean cast. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/ksm.h | 4 ++-- include/linux/rmap.h | 4 ++-- mm/ksm.c | 2 +- mm/page_idle.c | 2 +- mm/rmap.c | 14 +++++++------- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 0b4f17418f64..0630e545f4cb 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); -void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); +void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); #else /* !CONFIG_KSM */ @@ -79,7 +79,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, } static inline void rmap_walk_ksm(struct folio *folio, - struct rmap_walk_control *rwc) + const struct rmap_walk_control *rwc) { } diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a74d811c5b3f..17230c458341 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -292,8 +292,8 @@ struct rmap_walk_control { bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; -void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); -void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); +void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc); +void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/mm/ksm.c b/mm/ksm.c index dd737f925c04..eed2ff25a2fb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2588,7 +2588,7 @@ struct page *ksm_might_need_to_copy(struct page *page, return new_page; } -void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) +void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc) { struct stable_node *stable_node; struct rmap_item *rmap_item; diff --git a/mm/page_idle.c b/mm/page_idle.c index e34ba04e22e2..fc0435abf909 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -103,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page) if (need_lock && !folio_trylock(folio)) return; - rmap_walk(folio, (struct rmap_walk_control *)&rwc); + rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); diff --git a/mm/rmap.c b/mm/rmap.c index 2cc97c64901e..8192cb5809bc 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2173,7 +2173,7 @@ void __put_anon_vma(struct anon_vma *anon_vma) } static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, - struct rmap_walk_control *rwc) + const struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; @@ -2203,8 +2203,8 @@ static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. */ -static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, - bool locked) +static void rmap_walk_anon(struct folio *folio, + const struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; @@ -2251,8 +2251,8 @@ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ -static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, - bool locked) +static void rmap_walk_file(struct folio *folio, + const struct rmap_walk_control *rwc, bool locked) { struct address_space *mapping = folio_mapping(folio); pgoff_t pgoff_start, pgoff_end; @@ -2294,7 +2294,7 @@ done: i_mmap_unlock_read(mapping); } -void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) +void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) rmap_walk_ksm(folio, rwc); @@ -2305,7 +2305,7 @@ void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) } /* Like rmap_walk, but caller holds relevant rmap lock */ -void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) +void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); -- cgit v1.2.3-71-gd317 From d4b4084ac3154c51ff5fa71f669264cc44429be2 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 4 Feb 2022 14:13:31 -0500 Subject: mm: Turn can_split_huge_page() into can_split_folio() This function already required a head page to be passed, so this just adds type-safety and removes a few implicit calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/huge_mm.h | 4 ++-- mm/huge_memory.c | 15 ++++++++------- mm/vmscan.c | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4368b314d9c8..e0348bca3d66 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -185,7 +185,7 @@ void prep_transhuge_page(struct page *page); void free_transhuge_page(struct page *page); bool is_transparent_hugepage(struct page *page); -bool can_split_huge_page(struct page *page, int *pextra_pins); +bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) { @@ -387,7 +387,7 @@ static inline bool is_transparent_hugepage(struct page *page) #define thp_get_unmapped_area NULL static inline bool -can_split_huge_page(struct page *page, int *pextra_pins) +can_split_folio(struct folio *folio, int *pextra_pins) { BUILD_BUG(); return false; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d874d50e703b..38e233a7d977 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2516,18 +2516,19 @@ int page_trans_huge_mapcount(struct page *page) } /* Racy check whether the huge page can be split */ -bool can_split_huge_page(struct page *page, int *pextra_pins) +bool can_split_folio(struct folio *folio, int *pextra_pins) { int extra_pins; /* Additional pins from page cache */ - if (PageAnon(page)) - extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0; + if (folio_test_anon(folio)) + extra_pins = folio_test_swapcache(folio) ? + folio_nr_pages(folio) : 0; else - extra_pins = thp_nr_pages(page); + extra_pins = folio_nr_pages(folio); if (pextra_pins) *pextra_pins = extra_pins; - return total_mapcount(page) == page_count(page) - extra_pins - 1; + return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; } /* @@ -2619,7 +2620,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * Racy check if we can split the page, before unmap_page() will * split PMDs */ - if (!can_split_huge_page(head, &extra_pins)) { + if (!can_split_folio(folio, &extra_pins)) { ret = -EBUSY; goto out_unlock; } @@ -2928,7 +2929,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, goto next; total++; - if (!can_split_huge_page(compound_head(page), NULL)) + if (!can_split_folio(page_folio(page), NULL)) goto next; if (!trylock_page(page)) diff --git a/mm/vmscan.c b/mm/vmscan.c index 32473e069f68..7db5d0237333 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1703,18 +1703,18 @@ retry: if (!PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; - if (page_maybe_dma_pinned(page)) + if (folio_maybe_dma_pinned(folio)) goto keep_locked; if (PageTransHuge(page)) { /* cannot split THP, skip it */ - if (!can_split_huge_page(page, NULL)) + if (!can_split_folio(folio, NULL)) goto activate_locked; /* * Split pages without a PMD map right * away. Chances are some or all of the * tail pages can be freed without IO. */ - if (!compound_mapcount(page) && + if (!folio_entire_mapcount(folio) && split_folio_to_list(folio, page_list)) goto activate_locked; -- cgit v1.2.3-71-gd317 From 06d44142d49dc2e02d255ea9d72dc4c20f20388f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 10 Oct 2020 11:47:55 -0400 Subject: mm: Fix READ_ONLY_THP warning These counters only exist if CONFIG_READ_ONLY_THP_FOR_FS is defined, but we do not need to warn if the filesystem natively supports large folios. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/pagemap.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a73c928e1d74..0a2417fc531c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -326,7 +326,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping) if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else - WARN_ON_ONCE(1); + WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } @@ -336,7 +336,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else - WARN_ON_ONCE(1); + WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } -- cgit v1.2.3-71-gd317 From 421f1ab48452af48b64e205de1caca3d1ba415f4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 15 Jan 2022 23:27:08 -0500 Subject: mm: Make large folios depend on THP Some parts of the VM still depend on THP to handle large folios correctly. Until those are fixed, prevent creating large folios if THP are disabled. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/pagemap.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0a2417fc531c..20d7cbabf654 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -306,9 +306,14 @@ static inline void mapping_set_large_folios(struct address_space *mapping) __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } +/* + * Large folio support currently depends on THP. These dependencies are + * being worked on but are not yet fixed. + */ static inline bool mapping_large_folio_support(struct address_space *mapping) { - return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); + return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } static inline int filemap_nr_thps(struct address_space *mapping) -- cgit v1.2.3-71-gd317 From 18788cfa236967741b83db1035ab24539e2a21bb Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 May 2020 20:54:38 -0400 Subject: mm: Support arbitrary THP sizes For code which has not yet been converted from THP to folios, use the compound size of the page instead of assuming PTE or PMD size. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/huge_mm.h | 47 ----------------------------------------------- include/linux/mm.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e0348bca3d66..0734aff8fa19 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -250,30 +250,6 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, return NULL; } -/** - * thp_order - Order of a transparent huge page. - * @page: Head page of a transparent huge page. - */ -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) - return HPAGE_PMD_ORDER; - return 0; -} - -/** - * thp_nr_pages - The number of regular pages in this huge page. - * @page: The head page of a huge page. - */ -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) - return HPAGE_PMD_NR; - return 1; -} - /** * folio_test_pmd_mappable - Can we map this folio with a PMD? * @folio: The folio to test @@ -336,18 +312,6 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 0; -} - -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 1; -} - static inline bool folio_test_pmd_mappable(struct folio *folio) { return false; @@ -489,15 +453,4 @@ static inline int split_folio_to_list(struct folio *folio, return split_huge_page_to_list(&folio->page, list); } -/** - * thp_size - Size of a transparent huge page. - * @page: Head page of a transparent huge page. - * - * Return: Number of bytes in this page. - */ -static inline unsigned long thp_size(struct page *page) -{ - return PAGE_SIZE << thp_order(page); -} - #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index a879c583f665..c1966ad34142 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -939,6 +939,37 @@ static inline unsigned int page_shift(struct page *page) return PAGE_SHIFT + compound_order(page); } +/** + * thp_order - Order of a transparent huge page. + * @page: Head page of a transparent huge page. + */ +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return compound_order(page); +} + +/** + * thp_nr_pages - The number of regular pages in this huge page. + * @page: The head page of a huge page. + */ +static inline int thp_nr_pages(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return compound_nr(page); +} + +/** + * thp_size - Size of a transparent huge page. + * @page: Head page of a transparent huge page. + * + * Return: Number of bytes in this page. + */ +static inline unsigned long thp_size(struct page *page) +{ + return PAGE_SIZE << thp_order(page); +} + void free_compound_page(struct page *page); #ifdef CONFIG_MMU -- cgit v1.2.3-71-gd317 From 351bdbb6419ca988802882badadc321d384d0254 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 21 Mar 2022 10:22:37 +0100 Subject: net: Revert the softirq will run annotation in ____napi_schedule(). The lockdep annotation lockdep_assert_softirq_will_run() expects that either hard or soft interrupts are disabled because both guaranty that the "raised" soft-interrupts will be processed once the context is left. This triggers in flush_smp_call_function_from_idle() but it this case it explicitly calls do_softirq() in case of pending softirqs. Revert the "softirq will run" annotation in ____napi_schedule() and move the check back to __netif_rx() as it was. Keep the IRQ-off assert in ____napi_schedule() because this is always required. Fixes: fbd9a2ceba5c7 ("net: Add lockdep asserts to ____napi_schedule().") Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Jason A. Donenfeld Link: https://lore.kernel.org/r/YjhD3ZKWysyw8rc6@linutronix.de Signed-off-by: Jakub Kicinski --- include/linux/lockdep.h | 7 ------- net/core/dev.c | 3 +-- 2 files changed, 1 insertion(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0cc65d216701..467b94257105 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -329,12 +329,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_assert_none_held_once() \ lockdep_assert_once(!current->lockdep_depth) -/* - * Ensure that softirq is handled within the callchain and not delayed and - * handled by chance. - */ -#define lockdep_assert_softirq_will_run() \ - lockdep_assert_once(hardirq_count() | softirq_count()) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -420,7 +414,6 @@ extern int lockdep_is_held(const void *); #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) -#define lockdep_assert_softirq_will_run() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/net/core/dev.c b/net/core/dev.c index 8e0cc5f2020d..8a5109479dbe 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4277,7 +4277,6 @@ static inline void ____napi_schedule(struct softnet_data *sd, { struct task_struct *thread; - lockdep_assert_softirq_will_run(); lockdep_assert_irqs_disabled(); if (test_bit(NAPI_STATE_THREADED, &napi->state)) { @@ -4887,7 +4886,7 @@ int __netif_rx(struct sk_buff *skb) { int ret; - lockdep_assert_softirq_will_run(); + lockdep_assert_once(hardirq_count() | softirq_count()); trace_netif_rx_entry(skb); ret = netif_rx_internal(skb); -- cgit v1.2.3-71-gd317 From f5bfa23f576fdcc8e0b7fbff44cf70bd69ff9bdb Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 18 Feb 2022 16:46:54 -0800 Subject: RISC-V: Add a perf core library for pmu drivers Implement a perf core library that can support all the essential perf features in future. It can also accommodate any type of PMU implementation in future. Currently, both SBI based perf driver and legacy driver implemented uses the library. Most of the common perf functionalities are kept in this core library wile PMU specific driver can implement PMU specific features. For example, the SBI specific functionality will be implemented in the SBI specific driver. Reviewed-by: Anup Patel Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Palmer Dabbelt --- drivers/perf/Kconfig | 10 ++ drivers/perf/Makefile | 1 + drivers/perf/riscv_pmu.c | 322 +++++++++++++++++++++++++++++++++++++++++ include/linux/perf/riscv_pmu.h | 65 +++++++++ 4 files changed, 398 insertions(+) create mode 100644 drivers/perf/riscv_pmu.c create mode 100644 include/linux/perf/riscv_pmu.h (limited to 'include/linux') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index e1a0c44bc686..dbc0e3f98be9 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -56,6 +56,16 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config RISCV_PMU + depends on RISCV + bool "RISC-V PMU framework" + default y + help + Say y if you want to use CPU performance monitors on RISCV-based + systems. This provides the core PMU framework that abstracts common + PMU functionalities in a core library so that different PMU drivers + can reuse it. + config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 2db5418d5b0a..09082dea154b 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o +obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c new file mode 100644 index 000000000000..590a5789c128 --- /dev/null +++ b/drivers/perf/riscv_pmu.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V performance counter support. + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * This implementation is based on old RISC-V perf and ARM perf event code + * which are in turn based on sparc64 and x86 code. + */ + +#include +#include +#include +#include +#include +#include + +static unsigned long csr_read_num(int csr_num) +{ +#define switchcase_csr_read(__csr_num, __val) {\ + case __csr_num: \ + __val = csr_read(__csr_num); \ + break; } +#define switchcase_csr_read_2(__csr_num, __val) {\ + switchcase_csr_read(__csr_num + 0, __val) \ + switchcase_csr_read(__csr_num + 1, __val)} +#define switchcase_csr_read_4(__csr_num, __val) {\ + switchcase_csr_read_2(__csr_num + 0, __val) \ + switchcase_csr_read_2(__csr_num + 2, __val)} +#define switchcase_csr_read_8(__csr_num, __val) {\ + switchcase_csr_read_4(__csr_num + 0, __val) \ + switchcase_csr_read_4(__csr_num + 4, __val)} +#define switchcase_csr_read_16(__csr_num, __val) {\ + switchcase_csr_read_8(__csr_num + 0, __val) \ + switchcase_csr_read_8(__csr_num + 8, __val)} +#define switchcase_csr_read_32(__csr_num, __val) {\ + switchcase_csr_read_16(__csr_num + 0, __val) \ + switchcase_csr_read_16(__csr_num + 16, __val)} + + unsigned long ret = 0; + + switch (csr_num) { + switchcase_csr_read_32(CSR_CYCLE, ret) + switchcase_csr_read_32(CSR_CYCLEH, ret) + default : + break; + } + + return ret; +#undef switchcase_csr_read_32 +#undef switchcase_csr_read_16 +#undef switchcase_csr_read_8 +#undef switchcase_csr_read_4 +#undef switchcase_csr_read_2 +#undef switchcase_csr_read +} + +/* + * Read the CSR of a corresponding counter. + */ +unsigned long riscv_pmu_ctr_read_csr(unsigned long csr) +{ + if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H || + (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) { + pr_err("Invalid performance counter csr %lx\n", csr); + return -EINVAL; + } + + return csr_read_num(csr); +} + +u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) +{ + int cwidth; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (!rvpmu->ctr_get_width) + /** + * If the pmu driver doesn't support counter width, set it to default + * maximum allowed by the specification. + */ + cwidth = 63; + else { + if (hwc->idx == -1) + /* Handle init case where idx is not initialized yet */ + cwidth = rvpmu->ctr_get_width(0); + else + cwidth = rvpmu->ctr_get_width(hwc->idx); + } + + return GENMASK_ULL(cwidth, 0); +} + +u64 riscv_pmu_event_update(struct perf_event *event) +{ + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + unsigned long cmask; + u64 oldval, delta; + + if (!rvpmu->ctr_read) + return 0; + + cmask = riscv_pmu_ctr_get_width_mask(event); + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = rvpmu->ctr_read(event); + oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count); + } while (oldval != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & cmask; + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return delta; +} + +static void riscv_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + + if (!(hwc->state & PERF_HES_STOPPED)) { + if (rvpmu->ctr_stop) { + rvpmu->ctr_stop(event, 0); + hwc->state |= PERF_HES_STOPPED; + } + riscv_pmu_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +int riscv_pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int overflow = 0; + uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (max_period >> 1)) + left = (max_period >> 1); + + local64_set(&hwc->prev_count, (u64)-left); + perf_event_update_userpage(event); + + return overflow; +} + +static void riscv_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); + u64 init_val; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + hwc->state = 0; + riscv_pmu_event_set_period(event); + init_val = local64_read(&hwc->prev_count) & max_period; + rvpmu->ctr_start(event, init_val); + perf_event_update_userpage(event); +} + +static int riscv_pmu_add(struct perf_event *event, int flags) +{ + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + idx = rvpmu->ctr_get_idx(event); + if (idx < 0) + return idx; + + hwc->idx = idx; + cpuc->events[idx] = event; + cpuc->n_events++; + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + riscv_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void riscv_pmu_del(struct perf_event *event, int flags) +{ + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + + riscv_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[hwc->idx] = NULL; + /* The firmware need to reset the counter mapping */ + if (rvpmu->ctr_stop) + rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET); + cpuc->n_events--; + if (rvpmu->ctr_clear_idx) + rvpmu->ctr_clear_idx(event); + perf_event_update_userpage(event); + hwc->idx = -1; +} + +static void riscv_pmu_read(struct perf_event *event) +{ + riscv_pmu_event_update(event); +} + +static int riscv_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + int mapped_event; + u64 event_config = 0; + uint64_t cmask; + + hwc->flags = 0; + mapped_event = rvpmu->event_map(event, &event_config); + if (mapped_event < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapped_event; + } + + /* + * idx is set to -1 because the index of a general event should not be + * decided until binding to some counter in pmu->add(). + * config will contain the information about counter CSR + * the idx will contain the counter index + */ + hwc->config = event_config; + hwc->idx = -1; + hwc->event_base = mapped_event; + + if (!is_sampling_event(event)) { + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + cmask = riscv_pmu_ctr_get_width_mask(event); + hwc->sample_period = cmask >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +struct riscv_pmu *riscv_pmu_alloc(void) +{ + struct riscv_pmu *pmu; + int cpuid, i; + struct cpu_hw_events *cpuc; + + pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); + if (!pmu) + goto out; + + pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL); + if (!pmu->hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + goto out_free_pmu; + } + + for_each_possible_cpu(cpuid) { + cpuc = per_cpu_ptr(pmu->hw_events, cpuid); + cpuc->n_events = 0; + for (i = 0; i < RISCV_MAX_COUNTERS; i++) + cpuc->events[i] = NULL; + } + pmu->pmu = (struct pmu) { + .event_init = riscv_pmu_event_init, + .add = riscv_pmu_add, + .del = riscv_pmu_del, + .start = riscv_pmu_start, + .stop = riscv_pmu_stop, + .read = riscv_pmu_read, + }; + + return pmu; + +out_free_pmu: + kfree(pmu); +out: + return NULL; +} diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h new file mode 100644 index 000000000000..0d8979765d79 --- /dev/null +++ b/include/linux/perf/riscv_pmu.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 SiFive + * Copyright (C) 2018 Andes Technology Corporation + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + */ + +#ifndef _ASM_RISCV_PERF_EVENT_H +#define _ASM_RISCV_PERF_EVENT_H + +#include +#include +#include + +#ifdef CONFIG_RISCV_PMU + +/* + * The RISCV_MAX_COUNTERS parameter should be specified. + */ + +#define RISCV_MAX_COUNTERS 64 +#define RISCV_OP_UNSUPP (-EOPNOTSUPP) +#define RISCV_PMU_PDEV_NAME "riscv-pmu" + +#define RISCV_PMU_STOP_FLAG_RESET 1 + +struct cpu_hw_events { + /* currently enabled events */ + int n_events; + /* currently enabled events */ + struct perf_event *events[RISCV_MAX_COUNTERS]; + /* currently enabled counters */ + DECLARE_BITMAP(used_event_ctrs, RISCV_MAX_COUNTERS); +}; + +struct riscv_pmu { + struct pmu pmu; + char *name; + + irqreturn_t (*handle_irq)(int irq_num, void *dev); + + int num_counters; + u64 (*ctr_read)(struct perf_event *event); + int (*ctr_get_idx)(struct perf_event *event); + int (*ctr_get_width)(int idx); + void (*ctr_clear_idx)(struct perf_event *event); + void (*ctr_start)(struct perf_event *event, u64 init_val); + void (*ctr_stop)(struct perf_event *event, unsigned long flag); + int (*event_map)(struct perf_event *event, u64 *config); + + struct cpu_hw_events __percpu *hw_events; + struct hlist_node node; +}; + +#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu)) +unsigned long riscv_pmu_ctr_read_csr(unsigned long csr); +int riscv_pmu_event_set_period(struct perf_event *event); +uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event); +u64 riscv_pmu_event_update(struct perf_event *event); +struct riscv_pmu *riscv_pmu_alloc(void); + +#endif /* CONFIG_RISCV_PMU */ + +#endif /* _ASM_RISCV_PERF_EVENT_H */ -- cgit v1.2.3-71-gd317 From 9b3e150e310ee71d7bae1e31c38a300cfa5e951b Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 18 Feb 2022 16:46:55 -0800 Subject: RISC-V: Add a simple platform driver for RISC-V legacy perf The old RISC-V perf implementation allowed counting of only cycle/instruction counters using perf. Restore that feature by implementing a simple platform driver under a separate config to provide backward compatibility. Any existing software stack will continue to work as it is. However, it provides an easy way out in future where we can remove the legacy driver. Reviewed-by: Anup Patel Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Palmer Dabbelt --- drivers/perf/Kconfig | 10 +++ drivers/perf/Makefile | 1 + drivers/perf/riscv_pmu_legacy.c | 142 ++++++++++++++++++++++++++++++++++++++++ include/linux/perf/riscv_pmu.h | 6 ++ 4 files changed, 159 insertions(+) create mode 100644 drivers/perf/riscv_pmu_legacy.c (limited to 'include/linux') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index dbc0e3f98be9..386162ad858a 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -66,6 +66,16 @@ config RISCV_PMU PMU functionalities in a core library so that different PMU drivers can reuse it. +config RISCV_PMU_LEGACY + depends on RISCV_PMU + bool "RISC-V legacy PMU implementation" + default y + help + Say y if you want to use the legacy CPU performance monitor + implementation on RISC-V based systems. This only allows counting + of cycle/instruction counter and doesn't support counter overflow, + or programmable counters. It will be removed in future. + config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 09082dea154b..c3d3268d495b 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_HISI_PMU) += hisilicon/ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o +obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c new file mode 100644 index 000000000000..342778782359 --- /dev/null +++ b/drivers/perf/riscv_pmu_legacy.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V performance counter support. + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * This implementation is based on old RISC-V perf and ARM perf event code + * which are in turn based on sparc64 and x86 code. + */ + +#include +#include +#include + +#define RISCV_PMU_LEGACY_CYCLE 0 +#define RISCV_PMU_LEGACY_INSTRET 1 +#define RISCV_PMU_LEGACY_NUM_CTR 2 + +static bool pmu_init_done; + +static int pmu_legacy_ctr_get_idx(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + if (event->attr.type != PERF_TYPE_HARDWARE) + return -EOPNOTSUPP; + if (attr->config == PERF_COUNT_HW_CPU_CYCLES) + return RISCV_PMU_LEGACY_CYCLE; + else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS) + return RISCV_PMU_LEGACY_INSTRET; + else + return -EOPNOTSUPP; +} + +/* For legacy config & counter index are same */ +static int pmu_legacy_event_map(struct perf_event *event, u64 *config) +{ + return pmu_legacy_ctr_get_idx(event); +} + +static u64 pmu_legacy_read_ctr(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u64 val; + + if (idx == RISCV_PMU_LEGACY_CYCLE) { + val = riscv_pmu_ctr_read_csr(CSR_CYCLE); + if (IS_ENABLED(CONFIG_32BIT)) + val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val; + } else if (idx == RISCV_PMU_LEGACY_INSTRET) { + val = riscv_pmu_ctr_read_csr(CSR_INSTRET); + if (IS_ENABLED(CONFIG_32BIT)) + val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val; + } else + return 0; + + return val; +} + +static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival) +{ + struct hw_perf_event *hwc = &event->hw; + u64 initial_val = pmu_legacy_read_ctr(event); + + /** + * The legacy method doesn't really have a start/stop method. + * It also can not update the counter with a initial value. + * But we still need to set the prev_count so that read() can compute + * the delta. Just use the current counter value to set the prev_count. + */ + local64_set(&hwc->prev_count, initial_val); +} + +/** + * This is just a simple implementation to allow legacy implementations + * compatible with new RISC-V PMU driver framework. + * This driver only allows reading two counters i.e CYCLE & INSTRET. + * However, it can not start or stop the counter. Thus, it is not very useful + * will be removed in future. + */ +static void pmu_legacy_init(struct riscv_pmu *pmu) +{ + pr_info("Legacy PMU implementation is available\n"); + + pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR; + pmu->ctr_start = pmu_legacy_ctr_start; + pmu->ctr_stop = NULL; + pmu->event_map = pmu_legacy_event_map; + pmu->ctr_get_idx = pmu_legacy_ctr_get_idx; + pmu->ctr_get_width = NULL; + pmu->ctr_clear_idx = NULL; + pmu->ctr_read = pmu_legacy_read_ctr; + + perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); +} + +static int pmu_legacy_device_probe(struct platform_device *pdev) +{ + struct riscv_pmu *pmu = NULL; + + pmu = riscv_pmu_alloc(); + if (!pmu) + return -ENOMEM; + pmu_legacy_init(pmu); + + return 0; +} + +static struct platform_driver pmu_legacy_driver = { + .probe = pmu_legacy_device_probe, + .driver = { + .name = RISCV_PMU_LEGACY_PDEV_NAME, + }, +}; + +static int __init riscv_pmu_legacy_devinit(void) +{ + int ret; + struct platform_device *pdev; + + if (likely(pmu_init_done)) + return 0; + + ret = platform_driver_register(&pmu_legacy_driver); + if (ret) + return ret; + + pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0); + if (IS_ERR(pdev)) { + platform_driver_unregister(&pmu_legacy_driver); + return PTR_ERR(pdev); + } + + return ret; +} +late_initcall(riscv_pmu_legacy_devinit); + +void riscv_pmu_legacy_skip_init(void) +{ + pmu_init_done = true; +} diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 0d8979765d79..9140c491fc54 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -22,6 +22,7 @@ #define RISCV_MAX_COUNTERS 64 #define RISCV_OP_UNSUPP (-EOPNOTSUPP) #define RISCV_PMU_PDEV_NAME "riscv-pmu" +#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy" #define RISCV_PMU_STOP_FLAG_RESET 1 @@ -58,6 +59,11 @@ unsigned long riscv_pmu_ctr_read_csr(unsigned long csr); int riscv_pmu_event_set_period(struct perf_event *event); uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event); u64 riscv_pmu_event_update(struct perf_event *event); +#ifdef CONFIG_RISCV_PMU_LEGACY +void riscv_pmu_legacy_skip_init(void); +#else +static inline void riscv_pmu_legacy_skip_init(void) {}; +#endif struct riscv_pmu *riscv_pmu_alloc(void); #endif /* CONFIG_RISCV_PMU */ -- cgit v1.2.3-71-gd317 From e9991434596f5373dfd75857b445eb92a9253c56 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 18 Feb 2022 16:46:57 -0800 Subject: RISC-V: Add perf platform driver based on SBI PMU extension RISC-V SBI specification added a PMU extension that allows to configure start/stop any pmu counter. The RISC-V perf can use most of the generic perf features except interrupt overflow and event filtering based on privilege mode which will be added in future. It also allows to monitor a handful of firmware counters that can provide insights into firmware activity during a performance analysis. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Palmer Dabbelt --- drivers/perf/Kconfig | 10 + drivers/perf/Makefile | 1 + drivers/perf/riscv_pmu.c | 2 + drivers/perf/riscv_pmu_sbi.c | 578 +++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + include/linux/perf/riscv_pmu.h | 6 +- 6 files changed, 596 insertions(+), 2 deletions(-) create mode 100644 drivers/perf/riscv_pmu_sbi.c (limited to 'include/linux') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 386162ad858a..5645b5615c14 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -76,6 +76,16 @@ config RISCV_PMU_LEGACY of cycle/instruction counter and doesn't support counter overflow, or programmable counters. It will be removed in future. +config RISCV_PMU_SBI + depends on RISCV_PMU && RISCV_SBI + bool "RISC-V PMU based on SBI PMU extension" + default y + help + Say y if you want to use the CPU performance monitor + using SBI PMU extension on RISC-V based systems. This option provides + full perf feature support i.e. counter overflow, privilege mode + filtering, counter configuration. + config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index c3d3268d495b..f149735166e1 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o +obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 590a5789c128..b2b8d2074ed0 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -15,6 +15,8 @@ #include #include +#include + static unsigned long csr_read_num(int csr_num) { #define switchcase_csr_read(__csr_num, __val) {\ diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c new file mode 100644 index 000000000000..815d5c509c64 --- /dev/null +++ b/drivers/perf/riscv_pmu_sbi.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V performance counter support. + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * This code is based on ARM perf event code which is in turn based on + * sparc64 and x86 code. + */ + +#define pr_fmt(fmt) "riscv-pmu-sbi: " fmt + +#include +#include +#include + +#include + +union sbi_pmu_ctr_info { + unsigned long value; + struct { + unsigned long csr:12; + unsigned long width:6; +#if __riscv_xlen == 32 + unsigned long reserved:13; +#else + unsigned long reserved:45; +#endif + unsigned long type:1; + }; +}; + +/** + * RISC-V doesn't have hetergenous harts yet. This need to be part of + * per_cpu in case of harts with different pmu counters + */ +static union sbi_pmu_ctr_info *pmu_ctr_list; + +struct sbi_pmu_event_data { + union { + union { + struct hw_gen_event { + uint32_t event_code:16; + uint32_t event_type:4; + uint32_t reserved:12; + } hw_gen_event; + struct hw_cache_event { + uint32_t result_id:1; + uint32_t op_id:2; + uint32_t cache_id:13; + uint32_t event_type:4; + uint32_t reserved:12; + } hw_cache_event; + }; + uint32_t event_idx; + }; +}; + +static const struct sbi_pmu_event_data pmu_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = { + SBI_PMU_HW_CPU_CYCLES, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = { + SBI_PMU_HW_INSTRUCTIONS, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = { + SBI_PMU_HW_CACHE_REFERENCES, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = { + SBI_PMU_HW_CACHE_MISSES, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = { + SBI_PMU_HW_BRANCH_INSTRUCTIONS, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = { + SBI_PMU_HW_BRANCH_MISSES, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = { + SBI_PMU_HW_BUS_CYCLES, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = { + SBI_PMU_HW_STALLED_CYCLES_FRONTEND, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = { + SBI_PMU_HW_STALLED_CYCLES_BACKEND, + SBI_PMU_EVENT_TYPE_HW, 0}}, + [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = { + SBI_PMU_HW_REF_CPU_CYCLES, + SBI_PMU_EVENT_TYPE_HW, 0}}, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x +static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +[PERF_COUNT_HW_CACHE_OP_MAX] +[PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ), + C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), + C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), + C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, + }, + }, +}; + +static int pmu_sbi_ctr_get_width(int idx) +{ + return pmu_ctr_list[idx].width; +} + +static bool pmu_sbi_ctr_is_fw(int cidx) +{ + union sbi_pmu_ctr_info *info; + + info = &pmu_ctr_list[cidx]; + if (!info) + return false; + + return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; +} + +static int pmu_sbi_ctr_get_idx(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + struct sbiret ret; + int idx; + uint64_t cbase = 0; + uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0); + unsigned long cflags = 0; + + if (event->attr.exclude_kernel) + cflags |= SBI_PMU_CFG_FLAG_SET_SINH; + if (event->attr.exclude_user) + cflags |= SBI_PMU_CFG_FLAG_SET_UINH; + + /* retrieve the available counter index */ + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, + cflags, hwc->event_base, hwc->config, 0); + if (ret.error) { + pr_debug("Not able to find a counter for event %lx config %llx\n", + hwc->event_base, hwc->config); + return sbi_err_map_linux_errno(ret.error); + } + + idx = ret.value; + if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value) + return -ENOENT; + + /* Additional sanity check for the counter id */ + if (pmu_sbi_ctr_is_fw(idx)) { + if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) + return idx; + } else { + if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) + return idx; + } + + return -ENOENT; +} + +static void pmu_sbi_ctr_clear_idx(struct perf_event *event) +{ + + struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + int idx = hwc->idx; + + if (pmu_sbi_ctr_is_fw(idx)) + clear_bit(idx, cpuc->used_fw_ctrs); + else + clear_bit(idx, cpuc->used_hw_ctrs); +} + +static int pmu_event_find_cache(u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx; + + return ret; +} + +static bool pmu_sbi_is_fw_event(struct perf_event *event) +{ + u32 type = event->attr.type; + u64 config = event->attr.config; + + if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1)) + return true; + else + return false; +} + +static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) +{ + u32 type = event->attr.type; + u64 config = event->attr.config; + int bSoftware; + u64 raw_config_val; + int ret; + + switch (type) { + case PERF_TYPE_HARDWARE: + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + ret = pmu_hw_event_map[event->attr.config].event_idx; + break; + case PERF_TYPE_HW_CACHE: + ret = pmu_event_find_cache(config); + break; + case PERF_TYPE_RAW: + /* + * As per SBI specification, the upper 16 bits must be unused for + * a raw event. Use the MSB (63b) to distinguish between hardware + * raw event and firmware events. + */ + bSoftware = config >> 63; + raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; + if (bSoftware) { + if (raw_config_val < SBI_PMU_FW_MAX) + ret = (raw_config_val & 0xFFFF) | + (SBI_PMU_EVENT_TYPE_FW << 16); + else + return -EINVAL; + } else { + ret = RISCV_PMU_RAW_EVENT_IDX; + *econfig = raw_config_val; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u64 pmu_sbi_ctr_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct sbiret ret; + union sbi_pmu_ctr_info info; + u64 val = 0; + + if (pmu_sbi_is_fw_event(event)) { + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, + hwc->idx, 0, 0, 0, 0, 0); + if (!ret.error) + val = ret.value; + } else { + info = pmu_ctr_list[idx]; + val = riscv_pmu_ctr_read_csr(info.csr); + if (IS_ENABLED(CONFIG_32BIT)) + val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val; + } + + return val; +} + +static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) +{ + struct sbiret ret; + struct hw_perf_event *hwc = &event->hw; + unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, + 1, flag, ival, ival >> 32, 0); + if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED)) + pr_err("Starting counter idx %d failed with error %d\n", + hwc->idx, sbi_err_map_linux_errno(ret.error)); +} + +static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) +{ + struct sbiret ret; + struct hw_perf_event *hwc = &event->hw; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); + if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && + flag != SBI_PMU_STOP_FLAG_RESET) + pr_err("Stopping counter idx %d failed with error %d\n", + hwc->idx, sbi_err_map_linux_errno(ret.error)); +} + +static int pmu_sbi_find_num_ctrs(void) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0); + if (!ret.error) + return ret.value; + else + return sbi_err_map_linux_errno(ret.error); +} + +static int pmu_sbi_get_ctrinfo(int nctr) +{ + struct sbiret ret; + int i, num_hw_ctr = 0, num_fw_ctr = 0; + union sbi_pmu_ctr_info cinfo; + + pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL); + if (!pmu_ctr_list) + return -ENOMEM; + + for (i = 0; i <= nctr; i++) { + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); + if (ret.error) + /* The logical counter ids are not expected to be contiguous */ + continue; + cinfo.value = ret.value; + if (cinfo.type == SBI_PMU_CTR_TYPE_FW) + num_fw_ctr++; + else + num_hw_ctr++; + pmu_ctr_list[i].value = cinfo.value; + } + + pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr); + + return 0; +} + +static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); + + /* Enable the access for TIME csr only from the user mode now */ + csr_write(CSR_SCOUNTEREN, 0x2); + + /* Stop all the counters so that they can be enabled from perf */ + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, + 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0); + + return 0; +} + +static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) +{ + /* Disable all counters access for user mode now */ + csr_write(CSR_SCOUNTEREN, 0x0); + + return 0; +} + +static int pmu_sbi_device_probe(struct platform_device *pdev) +{ + struct riscv_pmu *pmu = NULL; + int num_counters; + int ret; + + pr_info("SBI PMU extension is available\n"); + pmu = riscv_pmu_alloc(); + if (!pmu) + return -ENOMEM; + + num_counters = pmu_sbi_find_num_ctrs(); + if (num_counters < 0) { + pr_err("SBI PMU extension doesn't provide any counters\n"); + return -ENODEV; + } + + /* cache all the information about counters now */ + if (pmu_sbi_get_ctrinfo(num_counters)) + return -ENODEV; + + pmu->num_counters = num_counters; + pmu->ctr_start = pmu_sbi_ctr_start; + pmu->ctr_stop = pmu_sbi_ctr_stop; + pmu->event_map = pmu_sbi_event_map; + pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; + pmu->ctr_get_width = pmu_sbi_ctr_get_width; + pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; + pmu->ctr_read = pmu_sbi_ctr_read; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); + if (ret) + return ret; + + ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); + if (ret) { + cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); + return ret; + } + + return 0; +} + +static struct platform_driver pmu_sbi_driver = { + .probe = pmu_sbi_device_probe, + .driver = { + .name = RISCV_PMU_PDEV_NAME, + }, +}; + +static int __init pmu_sbi_devinit(void) +{ + int ret; + struct platform_device *pdev; + + if (sbi_spec_version < sbi_mk_version(0, 3) || + sbi_probe_extension(SBI_EXT_PMU) <= 0) { + return 0; + } + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING, + "perf/riscv/pmu:starting", + pmu_sbi_starting_cpu, pmu_sbi_dying_cpu); + if (ret) { + pr_err("CPU hotplug notifier could not be registered: %d\n", + ret); + return ret; + } + + ret = platform_driver_register(&pmu_sbi_driver); + if (ret) + return ret; + + pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0); + if (IS_ERR(pdev)) { + platform_driver_unregister(&pmu_sbi_driver); + return PTR_ERR(pdev); + } + + /* Notify legacy implementation that SBI pmu is available*/ + riscv_pmu_legacy_skip_init(); + + return ret; +} +device_initcall(pmu_sbi_devinit) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 411a428ace4d..51c7e4929df7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -165,6 +165,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_PERF_RISCV_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 9140c491fc54..0f226948c0ca 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -31,8 +31,10 @@ struct cpu_hw_events { int n_events; /* currently enabled events */ struct perf_event *events[RISCV_MAX_COUNTERS]; - /* currently enabled counters */ - DECLARE_BITMAP(used_event_ctrs, RISCV_MAX_COUNTERS); + /* currently enabled hardware counters */ + DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS); + /* currently enabled firmware counters */ + DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS); }; struct riscv_pmu { -- cgit v1.2.3-71-gd317 From 4905ec2fb7e6421c14c9fb7276f5aa92f60f2b98 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 18 Feb 2022 16:46:58 -0800 Subject: RISC-V: Add sscofpmf extension support The sscofpmf extension allows counter overflow and filtering for programmable counters. Enable the perf driver to handle the overflow interrupt. The overflow interrupt is a hart local interrupt. Thus, per cpu overflow interrupts are setup as a child under the root INTC irq domain. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/csr.h | 8 +- arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpu.c | 1 + arch/riscv/kernel/cpufeature.c | 2 + drivers/perf/riscv_pmu_sbi.c | 222 ++++++++++++++++++++++++++++++++++++++++- include/linux/perf/riscv_pmu.h | 2 + 6 files changed, 230 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index ce493df11177..8b2e48077731 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -65,6 +65,7 @@ #define IRQ_S_EXT 9 #define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 +#define IRQ_PMU_OVF 13 /* Exception causes */ #define EXC_INST_MISALIGNED 0 @@ -212,6 +213,8 @@ #define CSR_HPMCOUNTER30H 0xc9e #define CSR_HPMCOUNTER31H 0xc9f +#define CSR_SSCOUNTOVF 0xda0 + #define CSR_SSTATUS 0x100 #define CSR_SIE 0x104 #define CSR_STVEC 0x105 @@ -298,7 +301,10 @@ # define RV_IRQ_SOFT IRQ_S_SOFT # define RV_IRQ_TIMER IRQ_S_TIMER # define RV_IRQ_EXT IRQ_S_EXT -#endif /* CONFIG_RISCV_M_MODE */ +# define RV_IRQ_PMU IRQ_PMU_OVF +# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF) + +#endif /* !CONFIG_RISCV_M_MODE */ /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ #define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 691fc9c8099b..0734e42f74f2 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -51,6 +51,7 @@ extern unsigned long elf_hwcap; * available logical extension id. */ enum riscv_isa_ext_id { + RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index fc115e307ef5..c11cb576394d 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -87,6 +87,7 @@ int riscv_of_parent_hartid(struct device_node *node) * extensions by an underscore. */ static struct riscv_isa_ext_data isa_ext_arr[] = { + __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a43c08af5f4b..1b2d42d7f589 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -190,6 +190,8 @@ void __init riscv_fill_hwcap(void) if (!ext_long) { this_hwcap |= isa2hwcap[(unsigned char)(*ext)]; set_bit(*ext - 'a', this_isa); + } else { + SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); } #undef SET_ISA_EXT_MAP } diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 815d5c509c64..a1317a483512 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -13,8 +13,13 @@ #include #include #include +#include +#include +#include +#include #include +#include union sbi_pmu_ctr_info { unsigned long value; @@ -35,6 +40,7 @@ union sbi_pmu_ctr_info { * per_cpu in case of harts with different pmu counters */ static union sbi_pmu_ctr_info *pmu_ctr_list; +static unsigned int riscv_pmu_irq; struct sbi_pmu_event_data { union { @@ -469,33 +475,229 @@ static int pmu_sbi_get_ctrinfo(int nctr) return 0; } +static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) +{ + /** + * No need to check the error because we are disabling all the counters + * which may include counters that are not enabled yet. + */ + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, + 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0); +} + +static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) +{ + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); + + /* No need to check the error here as we can't do anything about the error */ + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0, + cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0); +} + +/** + * This function starts all the used counters in two step approach. + * Any counter that did not overflow can be start in a single step + * while the overflowed counters need to be started with updated initialization + * value. + */ +static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, + unsigned long ctr_ovf_mask) +{ + int idx = 0; + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); + struct perf_event *event; + unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; + unsigned long ctr_start_mask = 0; + uint64_t max_period; + struct hw_perf_event *hwc; + u64 init_val = 0; + + ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask; + + /* Start all the counters that did not overflow in a single shot */ + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask, + 0, 0, 0, 0); + + /* Reinitialize and start all the counter that overflowed */ + while (ctr_ovf_mask) { + if (ctr_ovf_mask & 0x01) { + event = cpu_hw_evt->events[idx]; + hwc = &event->hw; + max_period = riscv_pmu_ctr_get_width_mask(event); + init_val = local64_read(&hwc->prev_count) & max_period; + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, + flag, init_val, 0, 0); + } + ctr_ovf_mask = ctr_ovf_mask >> 1; + idx++; + } +} + +static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) +{ + struct perf_sample_data data; + struct pt_regs *regs; + struct hw_perf_event *hw_evt; + union sbi_pmu_ctr_info *info; + int lidx, hidx, fidx; + struct riscv_pmu *pmu; + struct perf_event *event; + unsigned long overflow; + unsigned long overflowed_ctrs = 0; + struct cpu_hw_events *cpu_hw_evt = dev; + + if (WARN_ON_ONCE(!cpu_hw_evt)) + return IRQ_NONE; + + /* Firmware counter don't support overflow yet */ + fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); + event = cpu_hw_evt->events[fidx]; + if (!event) { + csr_clear(CSR_SIP, SIP_LCOFIP); + return IRQ_NONE; + } + + pmu = to_riscv_pmu(event->pmu); + pmu_sbi_stop_hw_ctrs(pmu); + + /* Overflow status register should only be read after counter are stopped */ + overflow = csr_read(CSR_SSCOUNTOVF); + + /** + * Overflow interrupt pending bit should only be cleared after stopping + * all the counters to avoid any race condition. + */ + csr_clear(CSR_SIP, SIP_LCOFIP); + + /* No overflow bit is set */ + if (!overflow) + return IRQ_NONE; + + regs = get_irq_regs(); + + for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { + struct perf_event *event = cpu_hw_evt->events[lidx]; + + /* Skip if invalid event or user did not request a sampling */ + if (!event || !is_sampling_event(event)) + continue; + + info = &pmu_ctr_list[lidx]; + /* Do a sanity check */ + if (!info || info->type != SBI_PMU_CTR_TYPE_HW) + continue; + + /* compute hardware counter index */ + hidx = info->csr - CSR_CYCLE; + /* check if the corresponding bit is set in sscountovf */ + if (!(overflow & (1 << hidx))) + continue; + + /* + * Keep a track of overflowed counters so that they can be started + * with updated initial value. + */ + overflowed_ctrs |= 1 << lidx; + hw_evt = &event->hw; + riscv_pmu_event_update(event); + perf_sample_data_init(&data, 0, hw_evt->last_period); + if (riscv_pmu_event_set_period(event)) { + /* + * Unlike other ISAs, RISC-V don't have to disable interrupts + * to avoid throttling here. As per the specification, the + * interrupt remains disabled until the OF bit is set. + * Interrupts are enabled again only during the start. + * TODO: We will need to stop the guest counters once + * virtualization support is added. + */ + perf_event_overflow(event, &data, regs); + } + } + pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs); + + return IRQ_HANDLED; +} + static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) { struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); /* Enable the access for TIME csr only from the user mode now */ csr_write(CSR_SCOUNTEREN, 0x2); /* Stop all the counters so that they can be enabled from perf */ - sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, - 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0); + pmu_sbi_stop_all(pmu); + + if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + cpu_hw_evt->irq = riscv_pmu_irq; + csr_clear(CSR_IP, BIT(RV_IRQ_PMU)); + csr_set(CSR_IE, BIT(RV_IRQ_PMU)); + enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); + } return 0; } static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) { + if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + disable_percpu_irq(riscv_pmu_irq); + csr_clear(CSR_IE, BIT(RV_IRQ_PMU)); + } + /* Disable all counters access for user mode now */ csr_write(CSR_SCOUNTEREN, 0x0); return 0; } +static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev) +{ + int ret; + struct cpu_hw_events __percpu *hw_events = pmu->hw_events; + struct device_node *cpu, *child; + struct irq_domain *domain = NULL; + + if (!riscv_isa_extension_available(NULL, SSCOFPMF)) + return -EOPNOTSUPP; + + for_each_of_cpu_node(cpu) { + child = of_get_compatible_child(cpu, "riscv,cpu-intc"); + if (!child) { + pr_err("Failed to find INTC node\n"); + return -ENODEV; + } + domain = irq_find_host(child); + of_node_put(child); + if (domain) + break; + } + if (!domain) { + pr_err("Failed to find INTC IRQ root domain\n"); + return -ENODEV; + } + + riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU); + if (!riscv_pmu_irq) { + pr_err("Failed to map PMU interrupt for node\n"); + return -ENODEV; + } + + ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); + if (ret) { + pr_err("registering percpu irq failed [%d]\n", ret); + return ret; + } + + return 0; +} + static int pmu_sbi_device_probe(struct platform_device *pdev) { struct riscv_pmu *pmu = NULL; int num_counters; - int ret; + int ret = -ENODEV; pr_info("SBI PMU extension is available\n"); pmu = riscv_pmu_alloc(); @@ -505,13 +707,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) num_counters = pmu_sbi_find_num_ctrs(); if (num_counters < 0) { pr_err("SBI PMU extension doesn't provide any counters\n"); - return -ENODEV; + goto out_free; } /* cache all the information about counters now */ if (pmu_sbi_get_ctrinfo(num_counters)) - return -ENODEV; + goto out_free; + ret = pmu_sbi_setup_irqs(pmu, pdev); + if (ret < 0) { + pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n"); + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; + } pmu->num_counters = num_counters; pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_stop = pmu_sbi_ctr_stop; @@ -532,6 +740,10 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) } return 0; + +out_free: + kfree(pmu); + return ret; } static struct platform_driver pmu_sbi_driver = { diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 0f226948c0ca..46f9b6fe306e 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -29,6 +29,8 @@ struct cpu_hw_events { /* currently enabled events */ int n_events; + /* Counter overflow interrupt */ + int irq; /* currently enabled events */ struct perf_event *events[RISCV_MAX_COUNTERS]; /* currently enabled hardware counters */ -- cgit v1.2.3-71-gd317 From 863a66cdb4df25fd146d9851c3289072298566d5 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 16 Mar 2022 09:27:08 +0800 Subject: lib/sbitmap: allocate sb->map via kvzalloc_node sbitmap has been used in scsi for replacing atomic operations on sdev->device_busy, so IOPS on some fast scsi storage can be improved. However, sdev->device_busy can be changed in fast path, so we have to allocate the sb->map statically. sdev->device_busy has been capped to 1024, but some drivers may configure the default depth as < 8, then cause each sbitmap word to hold only one bit. Finally 1024 * 128( sizeof(sbitmap_word)) bytes is needed for sb->map, given it is order 5 allocation, sometimes it may fail. Avoid the issue by using kvzalloc_node() for allocating sb->map. Cc: Ewan D. Milne Signed-off-by: Ming Lei Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20220316012708.354668-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 2 +- lib/sbitmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index dffeb8281c2d..8f5a86e210b9 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) static inline void sbitmap_free(struct sbitmap *sb) { free_percpu(sb->alloc_hint); - kfree(sb->map); + kvfree(sb->map); sb->map = NULL; } diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2eb3de18ded3..ae4fd4de9ebe 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, sb->alloc_hint = NULL; } - sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); + sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); if (!sb->map) { free_percpu(sb->alloc_hint); return -ENOMEM; -- cgit v1.2.3-71-gd317 From 4a0cb83ba6e0cd73a50fa4f84736846bf0029f2b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 21 Mar 2022 22:10:53 -0700 Subject: netdevice: add missing dm_private kdoc Building htmldocs complains: include/linux/netdevice.h:2295: warning: Function parameter or member 'dm_private' not described in 'net_device' Fixes: b26ef81c46ed ("drop_monitor: remove quadratic behavior") Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20220322051053.1883186-1-kuba@kernel.org Signed-off-by: Paolo Abeni --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e01a8ce7181f..cd7a597c55b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1899,6 +1899,8 @@ enum netdev_ml_priv_type { * @garp_port: GARP * @mrp_port: MRP * + * @dm_private: Drop monitor private + * * @dev: Class/net/name entry * @sysfs_groups: Space for optional device, statistics and wireless * sysfs groups -- cgit v1.2.3-71-gd317 From 0313bc278dac7cd9ce83a8d384581dc043156965 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 22 Mar 2022 09:17:20 -0700 Subject: Revert "random: block in /dev/urandom" This reverts commit 6f98a4bfee72c22f50aedb39fb761567969865fe. It turns out we still can't do this. Way too many platforms that don't have any real source of randomness at boot and no jitter entropy because they don't even have a cycle counter. As reported by Guenter Roeck: "This causes a large number of qemu boot test failures for various architectures (arm, m68k, microblaze, sparc32, xtensa are the ones I observed). Common denominator is that boot hangs at 'Saving random seed:'" This isn't hugely unexpected - we tried it, it failed, so now we'll revert it. Link: https://lore.kernel.org/all/20220322155820.GA1745955@roeck-us.net/ Reported-and-bisected-by: Guenter Roeck Cc: Jason Donenfeld Signed-off-by: Linus Torvalds --- drivers/char/mem.c | 2 +- drivers/char/random.c | 72 ++++++++++++++++++++++++++++++++++++++------------ include/linux/random.h | 2 +- 3 files changed, 57 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 9f586025dbe6..cc296f0823bd 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -707,7 +707,7 @@ static const struct memdev { [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &random_fops, 0 }, + [9] = { "urandom", 0666, &urandom_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 85f4cca6e707..66ce7c03a142 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -89,14 +89,17 @@ static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); +static struct ratelimit_state urandom_warning = + RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); static int ratelimit_disable __read_mostly; module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); /* * Returns whether or not the input pool has been seeded and thus guaranteed - * to supply cryptographically secure random numbers. This applies to - * get_random_bytes() and get_random_{u32,u64,int,long}(). + * to supply cryptographically secure random numbers. This applies to: the + * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, + * ,u64,int,long} family of functions. * * Returns: true if the input pool has been seeded. * false if the input pool has not been seeded. @@ -112,10 +115,10 @@ static void try_to_generate_entropy(void); /* * Wait for the input pool to be seeded and thus guaranteed to supply - * cryptographically secure random numbers. This applies to - * get_random_bytes() and get_random_{u32,u64,int,long}(). Using any - * of these functions without first calling this function means that - * the returned numbers might not be cryptographically secure. + * cryptographically secure random numbers. This applies to: the /dev/urandom + * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} + * family of functions. Using any of these functions without first calling + * this function forfeits the guarantee of security. * * Returns: 0 if the input pool has been seeded. * -ERESTARTSYS if the function was interrupted by a signal. @@ -220,10 +223,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * unsigned long get_random_long() * * These interfaces will return the requested number of random bytes - * into the given buffer or as a return value. The returned numbers are - * the same as those of getrandom(0). The integer family of functions may - * be higher performance for one-off random integers, because they do a - * bit of buffering and do not invoke reseeding. + * into the given buffer or as a return value. This is equivalent to + * a read from /dev/urandom. The integer family of functions may be + * higher performance for one-off random integers, because they do a + * bit of buffering. * *********************************************************************/ @@ -300,6 +303,11 @@ static void crng_reseed(bool force) unseeded_warning.missed); unseeded_warning.missed = 0; } + if (urandom_warning.missed) { + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } } } @@ -979,8 +987,10 @@ int __init rand_initialize(void) pr_notice("crng init done (trusting CPU's manufacturer)\n"); } - if (ratelimit_disable) + if (ratelimit_disable) { + urandom_warning.interval = 0; unseeded_warning.interval = 0; + } return 0; } @@ -1420,16 +1430,20 @@ static void try_to_generate_entropy(void) * getrandom(2) is the primary modern interface into the RNG and should * be used in preference to anything else. * - * Reading from /dev/random and /dev/urandom both have the same effect - * as calling getrandom(2) with flags=0. (In earlier versions, however, - * they each had different semantics.) + * Reading from /dev/random has the same functionality as calling + * getrandom(2) with flags=0. In earlier versions, however, it had + * vastly different semantics and should therefore be avoided, to + * prevent backwards compatibility issues. + * + * Reading from /dev/urandom has the same functionality as calling + * getrandom(2) with flags=GRND_INSECURE. Because it does not block + * waiting for the RNG to be ready, it should not be used. * * Writing to either /dev/random or /dev/urandom adds entropy to * the input pool but does not credit it. * - * Polling on /dev/random or /dev/urandom indicates when the RNG - * is initialized, on the read side, and when it wants new entropy, - * on the write side. + * Polling on /dev/random indicates when the RNG is initialized, on + * the read side, and when it wants new entropy, on the write side. * * Both /dev/random and /dev/urandom have the same set of ioctls for * adding entropy, getting the entropy count, zeroing the count, and @@ -1514,6 +1528,21 @@ static ssize_t random_write(struct file *file, const char __user *buffer, return (ssize_t)count; } +static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + static int maxwarn = 10; + + if (!crng_ready() && maxwarn > 0) { + maxwarn--; + if (__ratelimit(&urandom_warning)) + pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", + current->comm, nbytes); + } + + return get_random_bytes_user(buf, nbytes); +} + static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { @@ -1600,6 +1629,15 @@ const struct file_operations random_fops = { .llseek = noop_llseek, }; +const struct file_operations urandom_fops = { + .read = urandom_read, + .write = random_write, + .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .fasync = random_fasync, + .llseek = noop_llseek, +}; + /******************************************************************** * diff --git a/include/linux/random.h b/include/linux/random.h index c0baffe7afb1..f673fbb838b3 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -48,7 +48,7 @@ extern int unregister_random_ready_notifier(struct notifier_block *nb); extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE -extern const struct file_operations random_fops; +extern const struct file_operations random_fops, urandom_fops; #endif u32 get_random_u32(void); -- cgit v1.2.3-71-gd317 From 89f42494f92f448747bd8a7ab1ae8b5d5520577d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 16 Mar 2022 19:10:43 -0400 Subject: SUNRPC: Don't call connect() more than once on a TCP socket Avoid socket state races due to repeated calls to ->connect() using the same socket. If connect() returns 0 due to the connection having completed, but we are in fact in a closing state, then we may leave the XPRT_CONNECTING flag set on the transport. Reported-by: Enrico Scholz Fixes: 3be232f11a3c ("SUNRPC: Prevent immediate close+reconnect") Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprtsock.h | 1 + net/sunrpc/xprtsock.c | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 8c2a712cb242..689062afdd61 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -89,5 +89,6 @@ struct sock_xprt { #define XPRT_SOCK_WAKE_WRITE (5) #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) +#define XPRT_SOCK_CONNECT_SENT (8) #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7e39f87cde2d..8f8a03c3315a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2235,10 +2235,15 @@ static void xs_tcp_setup_socket(struct work_struct *work) if (atomic_read(&xprt->swapper)) current->flags |= PF_MEMALLOC; - if (!sock) { - sock = xs_create_sock(xprt, transport, - xs_addr(xprt)->sa_family, SOCK_STREAM, - IPPROTO_TCP, true); + + if (xprt_connected(xprt)) + goto out; + if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT, + &transport->sock_state) || + !sock) { + xs_reset_transport(transport); + sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, + SOCK_STREAM, IPPROTO_TCP, true); if (IS_ERR(sock)) { xprt_wake_pending_tasks(xprt, PTR_ERR(sock)); goto out; @@ -2262,6 +2267,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) fallthrough; case -EINPROGRESS: /* SYN_SENT! */ + set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; fallthrough; @@ -2323,13 +2329,9 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); - if (transport->sock != NULL && !xprt_connecting(xprt)) { + if (transport->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p for %lu " - "seconds\n", - xprt, xprt->reestablish_timeout / HZ); - - /* Start by resetting any existing state */ - xs_reset_transport(transport); + "seconds\n", xprt, xprt->reestablish_timeout / HZ); delay = xprt_reconnect_delay(xprt); xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); -- cgit v1.2.3-71-gd317 From 2790a624d43084de590884934969e19c7a82316a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 15 Mar 2022 08:12:40 -0400 Subject: SUNRPC: Replace internal use of SOCKWQ_ASYNC_NOSPACE The socket's SOCKWQ_ASYNC_NOSPACE can be cleared by various actors in the socket layer, so replace it with our own flag in the transport sock_state field. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprtsock.h | 1 + net/sunrpc/xprtsock.c | 22 ++++------------------ 2 files changed, 5 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 689062afdd61..3eb0079669c5 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -90,5 +90,6 @@ struct sock_xprt { #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) #define XPRT_SOCK_CONNECT_SENT (8) +#define XPRT_SOCK_NOSPACE (9) #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 68eee352d69a..2450b31b807a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -780,14 +780,8 @@ static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) /* Don't race with disconnect */ if (xprt_connected(xprt)) { - struct socket_wq *wq; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); - rcu_read_unlock(); - /* wait for more buffer space */ + set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; xprt_wait_for_buffer_space(xprt); @@ -1151,6 +1145,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); + clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); } static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) @@ -1497,7 +1492,6 @@ static void xs_tcp_state_change(struct sock *sk) static void xs_write_space(struct sock *sk) { - struct socket_wq *wq; struct sock_xprt *transport; struct rpc_xprt *xprt; @@ -1508,15 +1502,10 @@ static void xs_write_space(struct sock *sk) if (unlikely(!(xprt = xprt_from_sock(sk)))) return; transport = container_of(xprt, struct sock_xprt, xprt); - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) - goto out; - + if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state)) + return; xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); sk->sk_write_pending--; -out: - rcu_read_unlock(); } /** @@ -1857,7 +1846,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; - sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; xprt_clear_connected(xprt); @@ -2051,7 +2039,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; - sock_set_flag(sk, SOCK_FASYNC); xprt_set_connected(xprt); @@ -2218,7 +2205,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - sock_set_flag(sk, SOCK_FASYNC); sk->sk_error_report = xs_error_report; /* socket options */ -- cgit v1.2.3-71-gd317 From 33e5c765bc1ea5e06ea7603637f14d727e6fcdf3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 14 Mar 2022 22:02:22 -0400 Subject: NFS: Fix memory allocation in rpc_malloc() When in a low memory situation, we do want rpciod to kick off direct reclaim in the case where that helps, however we don't want it looping forever in mempool_alloc(). So first try allocating from the slab using GFP_KERNEL | __GFP_NORETRY, and then fall back to a GFP_NOWAIT allocation from the mempool. Ditto for rpc_alloc_task() Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 + net/sunrpc/sched.c | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 56710f8056d3..1d7a3e51b795 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -262,6 +262,7 @@ void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; extern struct workqueue_struct *xprtiod_workqueue; void rpc_prepare_task(struct rpc_task *task); +gfp_t rpc_task_gfp_mask(void); static inline int rpc_wait_for_completion_task(struct rpc_task *task) { diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 7c8f87ebdbc0..d59a033820be 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -57,6 +57,13 @@ struct workqueue_struct *rpciod_workqueue __read_mostly; struct workqueue_struct *xprtiod_workqueue __read_mostly; EXPORT_SYMBOL_GPL(xprtiod_workqueue); +gfp_t rpc_task_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + unsigned long rpc_task_timeout(const struct rpc_task *task) { @@ -1030,15 +1037,15 @@ int rpc_malloc(struct rpc_task *task) struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; - gfp_t gfp = GFP_KERNEL; - - if (RPC_IS_ASYNC(task)) - gfp = GFP_NOWAIT | __GFP_NOWARN; + gfp_t gfp = rpc_task_gfp_mask(); size += sizeof(struct rpc_buffer); - if (size <= RPC_BUFFER_MAXSIZE) - buf = mempool_alloc(rpc_buffer_mempool, gfp); - else + if (size <= RPC_BUFFER_MAXSIZE) { + buf = kmem_cache_alloc(rpc_buffer_slabp, gfp); + /* Reach for the mempool if dynamic allocation fails */ + if (!buf && RPC_IS_ASYNC(task)) + buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT); + } else buf = kmalloc(size, gfp); if (!buf) -- cgit v1.2.3-71-gd317 From 515dcdcd48736576c6f5c197814da6f81c60a21e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 21 Mar 2022 12:34:19 -0400 Subject: NFS: nfsiod should not block forever in mempool_alloc() The concern is that since nfsiod is sometimes required to kick off a commit, it can get locked up waiting forever in mempool_alloc() instead of failing gracefully and leaving the commit until later. Try to allocate from the slab first, with GFP_KERNEL | __GFP_NORETRY, then fall back to a non-blocking attempt to allocate from the memory pool. Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 7 +++++++ fs/nfs/pnfs_nfs.c | 8 ++++++-- fs/nfs/write.c | 24 +++++++++--------------- include/linux/nfs_fs.h | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 194840a97e3a..57b0497105c8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -587,6 +587,13 @@ nfs_write_match_verf(const struct nfs_writeverf *verf, !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier); } +static inline gfp_t nfs_io_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + /* unlink.c */ extern struct rpc_task * nfs_async_rename(struct inode *old_dir, struct inode *new_dir, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 316f68f96e57..657c242a18ff 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -419,7 +419,7 @@ static struct nfs_commit_data * pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo) { - struct nfs_commit_data *data = nfs_commitdata_alloc(false); + struct nfs_commit_data *data = nfs_commitdata_alloc(); if (!data) return NULL; @@ -515,7 +515,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, unsigned int nreq = 0; if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(mds_pages, NULL, cinfo, -1); + return -ENOMEM; + } data->ds_commit_index = -1; list_splice_init(mds_pages, &data->pages); list_add_tail(&data->list, &list); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 599a82406d38..ef47e3700e4b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -70,27 +70,17 @@ static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; -struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) +struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p; - if (never_fail) - p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); - else { - /* It is OK to do some reclaim, not no safe to wait - * for anything to be returned to the pool. - * mempool_alloc() cannot handle that particular combination, - * so we need two separate attempts. - */ + p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); + if (!p) { p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); - if (!p) - p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | - __GFP_NOWARN | __GFP_NORETRY); if (!p) return NULL; + memset(p, 0, sizeof(*p)); } - - memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); return p; } @@ -1826,7 +1816,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, if (list_empty(head)) return 0; - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(head, NULL, cinfo, -1); + return -ENOMEM; + } /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c47c448befc8..db305abafc9e 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -580,7 +580,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); -- cgit v1.2.3-71-gd317 From 62eb29526b48d20704668a2fdf97a49d01bf52ce Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Tue, 22 Mar 2022 14:38:33 -0700 Subject: linux/kthread.h: remove unused macros Ever since these macros were introduced in commit b56c0d8937e6 ("kthread: implement kthread_worker"), there has been precisely one user (commit 4d115420707a, "NVMe: Async IO queue deletion"), and that user went away in 2016 with db3cbfff5bcc ("NVMe: IO queue deletion re-write"). Apart from being unused, these macros are also awkward to use (which may contribute to them not being used): Having a way to statically (or on-stack) allocating the storage for the struct kthread_worker itself doesn't help much, since obviously one needs to have some code for actually _spawning_ the worker thread, which must have error checking. And these days we have the kthread_create_worker() interface which both allocates the struct kthread_worker and spawns the kthread. Link: https://lkml.kernel.org/r/20220314145343.494694-1-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes Acked-by: Tejun Heo Cc: "Eric W. Biederman" Cc: Petr Mladek Cc: David Hildenbrand Cc: Yafang Shao Cc: Cai Huoqing Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kthread.h | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 3df4ea04716f..de5d75bafd66 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -141,12 +141,6 @@ struct kthread_delayed_work { struct timer_list timer; }; -#define KTHREAD_WORKER_INIT(worker) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ - .work_list = LIST_HEAD_INIT((worker).work_list), \ - .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ - } - #define KTHREAD_WORK_INIT(work, fn) { \ .node = LIST_HEAD_INIT((work).node), \ .func = (fn), \ @@ -158,9 +152,6 @@ struct kthread_delayed_work { TIMER_IRQSAFE), \ } -#define DEFINE_KTHREAD_WORKER(worker) \ - struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) - #define DEFINE_KTHREAD_WORK(work, fn) \ struct kthread_work work = KTHREAD_WORK_INIT(work, fn) @@ -168,19 +159,6 @@ struct kthread_delayed_work { struct kthread_delayed_work dwork = \ KTHREAD_DELAYED_WORK_INIT(dwork, fn) -/* - * kthread_worker.lock needs its own lockdep class key when defined on - * stack with lockdep enabled. Use the following macros in such cases. - */ -#ifdef CONFIG_LOCKDEP -# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ - ({ kthread_init_worker(&worker); worker; }) -# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ - struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) -#else -# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) -#endif - extern void __kthread_init_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key); -- cgit v1.2.3-71-gd317 From bf507030f312c68fdbb17c2d33f317cda109a484 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:38:48 -0700 Subject: doc: convert 'subsection' to 'section' in gfp.h Patch series "Remove remaining parts of congestion tracking code", v2. This patch (of 11): Various DOC: sections in gfp.h have subsection headers (~~~) but the place where they are included in mm-api.rst does not have section, only chapters. So convert to section headers (---) to avoid confusion. Specifically if sections are added later in mm-api.rst, an error results. Link: https://lkml.kernel.org/r/164549971112.9187.16871723439770288255.stgit@noble.brown Link: https://lkml.kernel.org/r/164549983733.9187.17894407453436115822.stgit@noble.brown Signed-off-by: NeilBrown Cc: Jan Kara Cc: Wu Fengguang Cc: Jaegeuk Kim Cc: Chao Yu Cc: Jeff Layton Cc: Ilya Dryomov Cc: Miklos Szeredi Cc: Trond Myklebust Cc: Anna Schumaker Cc: Ryusuke Konishi Cc: Darrick J. Wong Cc: Philipp Reisner Cc: Lars Ellenberg Cc: Paolo Valente Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 80f63c862be5..20f6fbe12993 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -79,7 +79,7 @@ struct vm_area_struct; * DOC: Page mobility and placement hints * * Page mobility and placement hints - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * --------------------------------- * * These flags provide hints about how mobile the page is. Pages with similar * mobility are placed within the same pageblocks to minimise problems due @@ -112,7 +112,7 @@ struct vm_area_struct; * DOC: Watermark modifiers * * Watermark modifiers -- controls access to emergency reserves - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * ------------------------------------------------------------ * * %__GFP_HIGH indicates that the caller is high-priority and that granting * the request is necessary before the system can make forward progress. @@ -144,7 +144,7 @@ struct vm_area_struct; * DOC: Reclaim modifiers * * Reclaim modifiers - * ~~~~~~~~~~~~~~~~~ + * ----------------- * Please note that all the following flags are only applicable to sleepable * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). * @@ -224,7 +224,7 @@ struct vm_area_struct; * DOC: Action modifiers * * Action modifiers - * ~~~~~~~~~~~~~~~~ + * ---------------- * * %__GFP_NOWARN suppresses allocation failure reports. * @@ -256,7 +256,7 @@ struct vm_area_struct; * DOC: Useful GFP flag combinations * * Useful GFP flag combinations - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * ---------------------------- * * Useful GFP flag combinations that are commonly used. It is recommended * that subsystems start with one of these combinations and then set/clear -- cgit v1.2.3-71-gd317 From 84dacdbd5352bfef82423760fa2e8bffaeef9e05 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:38:51 -0700 Subject: mm: document and polish read-ahead code Add some "big-picture" documentation for read-ahead and polish the code to make it fit this documentation. The meaning of ->async_size is clarified to match its name. i.e. Any request to ->readahead() has a sync part and an async part. The caller will wait for the sync pages to complete, but will not wait for the async pages. The first async page is still marked PG_readahead Note that the current function names page_cache_sync_ra() and page_cache_async_ra() are misleading. All ra request are partly sync and partly async, so either part can be empty. A page_cache_sync_ra() request will usually set ->async_size non-zero, implying it is not all synchronous. When a non-zero req_count is passed to page_cache_async_ra(), the implication is that some prefix of the request is synchronous, though the calculation made there is incorrect - I haven't tried to fix it. Link: https://lkml.kernel.org/r/164549983734.9187.11586890887006601405.stgit@noble.brown Signed-off-by: NeilBrown Cc: Anna Schumaker Cc: Chao Yu Cc: Darrick J. Wong Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jan Kara Cc: Jeff Layton Cc: Jens Axboe Cc: Lars Ellenberg Cc: Miklos Szeredi Cc: Paolo Valente Cc: Philipp Reisner Cc: Ryusuke Konishi Cc: Trond Myklebust Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/mm-api.rst | 19 +++++++- Documentation/filesystems/vfs.rst | 16 ++++--- include/linux/fs.h | 9 +++- mm/readahead.c | 99 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 133 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst index 395835f9289f..f5b2f92822c8 100644 --- a/Documentation/core-api/mm-api.rst +++ b/Documentation/core-api/mm-api.rst @@ -58,15 +58,30 @@ Virtually Contiguous Mappings File Mapping and Page Cache =========================== -.. kernel-doc:: mm/readahead.c - :export: +Filemap +------- .. kernel-doc:: mm/filemap.c :export: +Readahead +--------- + +.. kernel-doc:: mm/readahead.c + :doc: Readahead Overview + +.. kernel-doc:: mm/readahead.c + :export: + +Writeback +--------- + .. kernel-doc:: mm/page-writeback.c :export: +Truncate +-------- + .. kernel-doc:: mm/truncate.c :export: diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index bf5c48066fac..b4a0baa46dcc 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -806,12 +806,16 @@ cache in your filesystem. The following members are defined: object. The pages are consecutive in the page cache and are locked. The implementation should decrement the page refcount after starting I/O on each page. Usually the page will be - unlocked by the I/O completion handler. If the filesystem decides - to stop attempting I/O before reaching the end of the readahead - window, it can simply return. The caller will decrement the page - refcount and unlock the remaining pages for you. Set PageUptodate - if the I/O completes successfully. Setting PageError on any page - will be ignored; simply unlock the page if an I/O error occurs. + unlocked by the I/O completion handler. The set of pages are + divided into some sync pages followed by some async pages, + rac->ra->async_size gives the number of async pages. The + filesystem should attempt to read all sync pages but may decide + to stop once it reaches the async pages. If it does decide to + stop attempting I/O, it can simply return. The caller will + remove the remaining pages from the address space, unlock them + and decrement the page refcount. Set PageUptodate if the I/O + completes successfully. Setting PageError on any page will be + ignored; simply unlock the page if an I/O error occurs. ``readpages`` called by the VM to read pages associated with the address_space diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..8b5c486bd4a2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -930,10 +930,15 @@ struct fown_struct { * struct file_ra_state - Track a file's readahead state. * @start: Where the most recent readahead started. * @size: Number of pages read in the most recent readahead. - * @async_size: Start next readahead when this many pages are left. - * @ra_pages: Maximum size of a readahead request. + * @async_size: Numer of pages that were/are not needed immediately + * and so were/are genuinely "ahead". Start next readahead when + * the first of these pages is accessed. + * @ra_pages: Maximum size of a readahead request, copied from the bdi. * @mmap_miss: How many mmap accesses missed in the page cache. * @prev_pos: The last byte in the most recent read request. + * + * When this structure is passed to ->readahead(), the "most recent" + * readahead means the current readahead. */ struct file_ra_state { pgoff_t start; diff --git a/mm/readahead.c b/mm/readahead.c index cf0dcf89eb69..73b2bc5302e0 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -8,6 +8,105 @@ * Initial version. */ +/** + * DOC: Readahead Overview + * + * Readahead is used to read content into the page cache before it is + * explicitly requested by the application. Readahead only ever + * attempts to read pages that are not yet in the page cache. If a + * page is present but not up-to-date, readahead will not try to read + * it. In that case a simple ->readpage() will be requested. + * + * Readahead is triggered when an application read request (whether a + * systemcall or a page fault) finds that the requested page is not in + * the page cache, or that it is in the page cache and has the + * %PG_readahead flag set. This flag indicates that the page was loaded + * as part of a previous read-ahead request and now that it has been + * accessed, it is time for the next read-ahead. + * + * Each readahead request is partly synchronous read, and partly async + * read-ahead. This is reflected in the struct file_ra_state which + * contains ->size being to total number of pages, and ->async_size + * which is the number of pages in the async section. The first page in + * this async section will have %PG_readahead set as a trigger for a + * subsequent read ahead. Once a series of sequential reads has been + * established, there should be no need for a synchronous component and + * all read ahead request will be fully asynchronous. + * + * When either of the triggers causes a readahead, three numbers need to + * be determined: the start of the region, the size of the region, and + * the size of the async tail. + * + * The start of the region is simply the first page address at or after + * the accessed address, which is not currently populated in the page + * cache. This is found with a simple search in the page cache. + * + * The size of the async tail is determined by subtracting the size that + * was explicitly requested from the determined request size, unless + * this would be less than zero - then zero is used. NOTE THIS + * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED + * PAGE. + * + * The size of the region is normally determined from the size of the + * previous readahead which loaded the preceding pages. This may be + * discovered from the struct file_ra_state for simple sequential reads, + * or from examining the state of the page cache when multiple + * sequential reads are interleaved. Specifically: where the readahead + * was triggered by the %PG_readahead flag, the size of the previous + * readahead is assumed to be the number of pages from the triggering + * page to the start of the new readahead. In these cases, the size of + * the previous readahead is scaled, often doubled, for the new + * readahead, though see get_next_ra_size() for details. + * + * If the size of the previous read cannot be determined, the number of + * preceding pages in the page cache is used to estimate the size of + * a previous read. This estimate could easily be misled by random + * reads being coincidentally adjacent, so it is ignored unless it is + * larger than the current request, and it is not scaled up, unless it + * is at the start of file. + * + * In general read ahead is accelerated at the start of the file, as + * reads from there are often sequential. There are other minor + * adjustments to the read ahead size in various special cases and these + * are best discovered by reading the code. + * + * The above calculation determines the readahead, to which any requested + * read size may be added. + * + * Readahead requests are sent to the filesystem using the ->readahead() + * address space operation, for which mpage_readahead() is a canonical + * implementation. ->readahead() should normally initiate reads on all + * pages, but may fail to read any or all pages without causing an IO + * error. The page cache reading code will issue a ->readpage() request + * for any page which ->readahead() does not provided, and only an error + * from this will be final. + * + * ->readahead() will generally call readahead_page() repeatedly to get + * each page from those prepared for read ahead. It may fail to read a + * page by: + * + * * not calling readahead_page() sufficiently many times, effectively + * ignoring some pages, as might be appropriate if the path to + * storage is congested. + * + * * failing to actually submit a read request for a given page, + * possibly due to insufficient resources, or + * + * * getting an error during subsequent processing of a request. + * + * In the last two cases, the page should be unlocked to indicate that + * the read attempt has failed. In the first case the page will be + * unlocked by the caller. + * + * Those pages not in the final ``async_size`` of the request should be + * considered to be important and ->readahead() should not fail them due + * to congestion or temporary resource unavailability, but should wait + * for necessary resources (e.g. memory or indexing information) to + * become available. Pages in the final ``async_size`` may be + * considered less urgent and failure to read them is more acceptable. + * They will eventually be read individually using ->readpage(). + */ + #include #include #include -- cgit v1.2.3-71-gd317 From 6df25e58532be7a4cd6fb15bcd85805947402d91 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:39:01 -0700 Subject: nfs: remove reliance on bdi congestion The bdi congestion tracking in not widely used and will be removed. NFS is one of a small number of filesystems that uses it, setting just the async (write) congestion flag at what it determines are appropriate times. The only remaining effect of the async flag is to cause (some) WB_SYNC_NONE writes to be skipped. So instead of setting the flag, set an internal flag and change: - .writepages to do nothing if WB_SYNC_NONE and the flag is set - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE and the flag is set. The writepages change causes a behavioural change in that pageout() can now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will be called on the page which (I think) wil further delay the next attempt at writeout. This might be a good thing. Link: https://lkml.kernel.org/r/164549983738.9187.3972219847989393182.stgit@noble.brown Signed-off-by: NeilBrown Cc: Anna Schumaker Cc: Chao Yu Cc: Darrick J. Wong Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jan Kara Cc: Jeff Layton Cc: Jens Axboe Cc: Lars Ellenberg Cc: Miklos Szeredi Cc: Paolo Valente Cc: Philipp Reisner Cc: Ryusuke Konishi Cc: Trond Myklebust Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nfs/write.c | 14 +++++++++++--- include/linux/nfs_fs_sb.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 987a187bd39a..7c986164018e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -417,7 +417,7 @@ static void nfs_set_page_writeback(struct page *page) if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) - set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); + nfss->write_congested = 1; } static void nfs_end_page_writeback(struct nfs_page *req) @@ -433,7 +433,7 @@ static void nfs_end_page_writeback(struct nfs_page *req) end_page_writeback(req->wb_page); if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) - clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); + nfss->write_congested = 0; } /* @@ -672,6 +672,10 @@ static int nfs_writepage_locked(struct page *page, struct inode *inode = page_file_mapping(page)->host; int err; + if (wbc->sync_mode == WB_SYNC_NONE && + NFS_SERVER(inode)->write_congested) + return AOP_WRITEPAGE_ACTIVATE; + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_pageio_init_write(&pgio, inode, 0, false, &nfs_async_write_completion_ops); @@ -719,6 +723,10 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) int priority = 0; int err; + if (wbc->sync_mode == WB_SYNC_NONE && + NFS_SERVER(inode)->write_congested) + return 0; + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || @@ -1893,7 +1901,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) } nfss = NFS_SERVER(data->inode); if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) - clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC); + nfss->write_congested = 0; nfs_init_cinfo(&cinfo, data->inode, data->dreq); nfs_commit_end(cinfo.mds); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index ca0959e51e81..6aa2a200676a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -138,6 +138,7 @@ struct nfs_server { struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ atomic_long_t writeback; /* number of writeback pages */ + unsigned int write_congested;/* flag set when writeback gets too high */ unsigned int flags; /* various flags */ /* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ -- cgit v1.2.3-71-gd317 From fe55d563d4174f13839a9b7ef7309da5031b5d93 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:39:07 -0700 Subject: remove inode_congested() inode_congested() reports if the backing-device for the inode is congested. No bdi reports congestion any more, so this always returns 'false'. So remove inode_congested() and related functions, and remove the call sites, assuming that inode_congested() always returns 'false'. Link: https://lkml.kernel.org/r/164549983741.9187.2174285592262191311.stgit@noble.brown Signed-off-by: NeilBrown Cc: Anna Schumaker Cc: Chao Yu Cc: Darrick J. Wong Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jan Kara Cc: Jeff Layton Cc: Jens Axboe Cc: Lars Ellenberg Cc: Miklos Szeredi Cc: Paolo Valente Cc: Philipp Reisner Cc: Ryusuke Konishi Cc: Trond Myklebust Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 37 ------------------------------------- include/linux/backing-dev.h | 22 ---------------------- mm/fadvise.c | 5 ++--- mm/readahead.c | 6 ------ mm/vmscan.c | 17 +---------------- 5 files changed, 3 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f8d7fe6db989..42a3dfad40b8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -893,43 +893,6 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, } EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner); -/** - * inode_congested - test whether an inode is congested - * @inode: inode to test for congestion (may be NULL) - * @cong_bits: mask of WB_[a]sync_congested bits to test - * - * Tests whether @inode is congested. @cong_bits is the mask of congestion - * bits to test and the return value is the mask of set bits. - * - * If cgroup writeback is enabled for @inode, the congestion state is - * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg - * associated with @inode is congested; otherwise, the root wb's congestion - * state is used. - * - * @inode is allowed to be NULL as this function is often called on - * mapping->host which is NULL for the swapper space. - */ -int inode_congested(struct inode *inode, int cong_bits) -{ - /* - * Once set, ->i_wb never becomes NULL while the inode is alive. - * Start transaction iff ->i_wb is visible. - */ - if (inode && inode_to_wb_is_valid(inode)) { - struct bdi_writeback *wb; - struct wb_lock_cookie lock_cookie = {}; - bool congested; - - wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); - congested = wb_congested(wb, cong_bits); - unlocked_inode_to_wb_end(inode, &lock_cookie); - return congested; - } - - return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); -} -EXPORT_SYMBOL_GPL(inode_congested); - /** * wb_split_bdi_pages - split nr_pages to write according to bandwidth * @wb: target bdi_writeback to split @nr_pages to diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 483979c1b9f4..860b675c2929 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -162,7 +162,6 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct blkcg *blkcg); -int inode_congested(struct inode *inode, int cong_bits); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode @@ -390,29 +389,8 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -static inline int inode_congested(struct inode *inode, int cong_bits) -{ - return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); -} - #endif /* CONFIG_CGROUP_WRITEBACK */ -static inline int inode_read_congested(struct inode *inode) -{ - return inode_congested(inode, 1 << WB_sync_congested); -} - -static inline int inode_write_congested(struct inode *inode) -{ - return inode_congested(inode, 1 << WB_async_congested); -} - -static inline int inode_rw_congested(struct inode *inode) -{ - return inode_congested(inode, (1 << WB_sync_congested) | - (1 << WB_async_congested)); -} - static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) { return wb_congested(&bdi->wb, cong_bits); diff --git a/mm/fadvise.c b/mm/fadvise.c index d6baa4f451c5..338f16022012 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -109,9 +109,8 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) case POSIX_FADV_NOREUSE: break; case POSIX_FADV_DONTNEED: - if (!inode_write_congested(mapping->host)) - __filemap_fdatawrite_range(mapping, offset, endbyte, - WB_SYNC_NONE); + __filemap_fdatawrite_range(mapping, offset, endbyte, + WB_SYNC_NONE); /* * First and last FULL page! Partial pages are deliberately diff --git a/mm/readahead.c b/mm/readahead.c index 8a97bd408cf6..f61943fd1741 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -709,12 +709,6 @@ void page_cache_async_ra(struct readahead_control *ractl, folio_clear_readahead(folio); - /* - * Defer asynchronous read-ahead on IO congestion. - */ - if (inode_read_congested(ractl->mapping->host)) - return; - if (blk_cgroup_congested()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 59b14e0d696c..e38de6456cdc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -989,17 +989,6 @@ static inline int is_page_cache_freeable(struct page *page) return page_count(page) - page_has_private(page) == 1 + page_cache_pins; } -static int may_write_to_inode(struct inode *inode) -{ - if (current->flags & PF_SWAPWRITE) - return 1; - if (!inode_write_congested(inode)) - return 1; - if (inode_to_bdi(inode) == current->backing_dev_info) - return 1; - return 0; -} - /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent @@ -1201,8 +1190,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; - if (!may_write_to_inode(mapping->host)) - return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { int res; @@ -1578,9 +1565,7 @@ retry: * end of the LRU a second time. */ mapping = page_mapping(page); - if (((dirty || writeback) && mapping && - inode_write_congested(mapping->host)) || - (writeback && PageReclaim(page))) + if (writeback && PageReclaim(page)) stat->nr_congested++; /* -- cgit v1.2.3-71-gd317 From b9b1335e640308acc1b8f26c739b804c80a6c147 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:39:10 -0700 Subject: remove bdi_congested() and wb_congested() and related functions These functions are no longer useful as no BDIs report congestions any more. Removing the test on bdi_write_contested() in current_may_throttle() could cause a small change in behaviour, but only when PF_LOCAL_THROTTLE is set. So replace the calls by 'false' and simplify the code - and remove the functions. [akpm@linux-foundation.org: fix build] Link: https://lkml.kernel.org/r/164549983742.9187.2570198746005819592.stgit@noble.brown Signed-off-by: NeilBrown Acked-by: Ryusuke Konishi [nilfs] Cc: Anna Schumaker Cc: Chao Yu Cc: Darrick J. Wong Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jan Kara Cc: Jeff Layton Cc: Jens Axboe Cc: Lars Ellenberg Cc: Miklos Szeredi Cc: Paolo Valente Cc: Philipp Reisner Cc: Trond Myklebust Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/drbd/drbd_int.h | 3 --- drivers/block/drbd/drbd_req.c | 3 +-- fs/ext2/ialloc.c | 5 ----- fs/nilfs2/segbuf.c | 16 ---------------- fs/xfs/xfs_buf.c | 3 --- include/linux/backing-dev.h | 26 -------------------------- mm/vmscan.c | 4 +--- 7 files changed, 2 insertions(+), 58 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index f27d5b0f9a0b..f804b1bfb3e6 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -638,9 +638,6 @@ enum { STATE_SENT, /* Do not change state/UUIDs while this is set */ CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) * pending, from drbd worker context. - * If set, bdi_write_congested() returns true, - * so shrink_page_list() would not recurse into, - * and potentially deadlock on, this drbd worker. */ DISCONNECT_SENT, diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3235532ae077..2e5fb7e442e3 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -909,8 +909,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se switch (rbm) { case RB_CONGESTED_REMOTE: - return bdi_read_congested( - device->ldev->backing_bdev->bd_disk->bdi); + return 0; case RB_LEAST_PENDING: return atomic_read(&device->local_cnt) > atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index df14e750e9fe..998dd2ac8008 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -170,11 +170,6 @@ static void ext2_preread_inode(struct inode *inode) unsigned long offset; unsigned long block; struct ext2_group_desc * gdp; - struct backing_dev_info *bdi; - - bdi = inode_to_bdi(inode); - if (bdi_rw_congested(bdi)) - return; block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb); gdp = ext2_get_group_desc(inode->i_sb, block_group, NULL); diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 43287b0d3e9b..f4b57bc0c586 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -341,18 +341,6 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, int mode_flags) { struct bio *bio = wi->bio; - int err; - - if (segbuf->sb_nbio > 0 && - bdi_write_congested(segbuf->sb_super->s_bdi)) { - wait_for_completion(&segbuf->sb_bio_event); - segbuf->sb_nbio--; - if (unlikely(atomic_read(&segbuf->sb_err))) { - bio_put(bio); - err = -EIO; - goto failed; - } - } bio->bi_end_io = nilfs_end_bio_write; bio->bi_private = segbuf; @@ -365,10 +353,6 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); wi->start = wi->end; return 0; - - failed: - wi->bio = NULL; - return err; } /** diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index b45e0d50a405..b7ebcfe6b8d3 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -843,9 +843,6 @@ xfs_buf_readahead_map( { struct xfs_buf *bp; - if (bdi_read_congested(target->bt_bdev->bd_disk->bdi)) - return; - xfs_buf_read_map(target, map, nmaps, XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, __this_address); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 860b675c2929..2d764566280c 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -135,11 +135,6 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb) struct backing_dev_info *inode_to_bdi(struct inode *inode); -static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) -{ - return wb->congested & cong_bits; -} - long congestion_wait(int sync, long timeout); static inline bool mapping_can_writeback(struct address_space *mapping) @@ -391,27 +386,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) #endif /* CONFIG_CGROUP_WRITEBACK */ -static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) -{ - return wb_congested(&bdi->wb, cong_bits); -} - -static inline int bdi_read_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, 1 << WB_sync_congested); -} - -static inline int bdi_write_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, 1 << WB_async_congested); -} - -static inline int bdi_rw_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, (1 << WB_sync_congested) | - (1 << WB_async_congested)); -} - const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ diff --git a/mm/vmscan.c b/mm/vmscan.c index e38de6456cdc..5e1469887afa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2364,9 +2364,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, */ static int current_may_throttle(void) { - return !(current->flags & PF_LOCAL_THROTTLE) || - current->backing_dev_info == NULL || - bdi_write_congested(current->backing_dev_info); + return !(current->flags & PF_LOCAL_THROTTLE); } /* -- cgit v1.2.3-71-gd317 From a88f2096d5a2d91179db5dd9aa8f60dc3df9bb3e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 22 Mar 2022 14:39:19 -0700 Subject: remove congestion tracking framework This framework is no longer used - so discard it. Link: https://lkml.kernel.org/r/164549983747.9187.6171768583526866601.stgit@noble.brown Signed-off-by: NeilBrown Cc: Anna Schumaker Cc: Chao Yu Cc: Darrick J. Wong Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jan Kara Cc: Jeff Layton Cc: Jens Axboe Cc: Lars Ellenberg Cc: Miklos Szeredi Cc: Paolo Valente Cc: Philipp Reisner Cc: Ryusuke Konishi Cc: Trond Myklebust Cc: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/backing-dev-defs.h | 8 ------ include/linux/backing-dev.h | 2 -- include/trace/events/writeback.h | 28 -------------------- mm/backing-dev.c | 57 ---------------------------------------- 4 files changed, 95 deletions(-) (limited to 'include/linux') diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 993c5628a726..e863c88df95f 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -207,14 +207,6 @@ struct backing_dev_info { #endif }; -enum { - BLK_RW_ASYNC = 0, - BLK_RW_SYNC = 1, -}; - -void clear_bdi_congested(struct backing_dev_info *bdi, int sync); -void set_bdi_congested(struct backing_dev_info *bdi, int sync); - struct wb_lock_cookie { bool locked; unsigned long flags; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 2d764566280c..87ce24d238f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -135,8 +135,6 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb) struct backing_dev_info *inode_to_bdi(struct inode *inode); -long congestion_wait(int sync, long timeout); - static inline bool mapping_can_writeback(struct address_space *mapping) { return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index a345b1e12daf..86b2a82da546 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -735,34 +735,6 @@ TRACE_EVENT(writeback_sb_inodes_requeue, ) ); -DECLARE_EVENT_CLASS(writeback_congest_waited_template, - - TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), - - TP_ARGS(usec_timeout, usec_delayed), - - TP_STRUCT__entry( - __field( unsigned int, usec_timeout ) - __field( unsigned int, usec_delayed ) - ), - - TP_fast_assign( - __entry->usec_timeout = usec_timeout; - __entry->usec_delayed = usec_delayed; - ), - - TP_printk("usec_timeout=%u usec_delayed=%u", - __entry->usec_timeout, - __entry->usec_delayed) -); - -DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, - - TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), - - TP_ARGS(usec_timeout, usec_delayed) -); - DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_PROTO(struct inode *inode, diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eae96dfe0261..7176af65b103 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -1005,60 +1005,3 @@ const char *bdi_dev_name(struct backing_dev_info *bdi) return bdi->dev_name; } EXPORT_SYMBOL_GPL(bdi_dev_name); - -static wait_queue_head_t congestion_wqh[2] = { - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) - }; -static atomic_t nr_wb_congested[2]; - -void clear_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - wait_queue_head_t *wqh = &congestion_wqh[sync]; - enum wb_congested_state bit; - - bit = sync ? WB_sync_congested : WB_async_congested; - if (test_and_clear_bit(bit, &bdi->wb.congested)) - atomic_dec(&nr_wb_congested[sync]); - smp_mb__after_atomic(); - if (waitqueue_active(wqh)) - wake_up(wqh); -} -EXPORT_SYMBOL(clear_bdi_congested); - -void set_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - enum wb_congested_state bit; - - bit = sync ? WB_sync_congested : WB_async_congested; - if (!test_and_set_bit(bit, &bdi->wb.congested)) - atomic_inc(&nr_wb_congested[sync]); -} -EXPORT_SYMBOL(set_bdi_congested); - -/** - * congestion_wait - wait for a backing_dev to become uncongested - * @sync: SYNC or ASYNC IO - * @timeout: timeout in jiffies - * - * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit - * write congestion. If no backing_devs are congested then just wait for the - * next write to be completed. - */ -long congestion_wait(int sync, long timeout) -{ - long ret; - unsigned long start = jiffies; - DEFINE_WAIT(wait); - wait_queue_head_t *wqh = &congestion_wqh[sync]; - - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); - ret = io_schedule_timeout(timeout); - finish_wait(wqh, &wait); - - trace_writeback_congestion_wait(jiffies_to_usecs(timeout), - jiffies_to_usecs(jiffies - start)); - - return ret; -} -EXPORT_SYMBOL(congestion_wait); -- cgit v1.2.3-71-gd317 From a128b054ce029554a4a52fc3abb8c1df8bafcaef Mon Sep 17 00:00:00 2001 From: Anthony Iliopoulos Date: Tue, 22 Mar 2022 14:39:22 -0700 Subject: mount: warn only once about timestamp range expiration Commit f8b92ba67c5d ("mount: Add mount warning for impending timestamp expiry") introduced a mount warning regarding filesystem timestamp limits, that is printed upon each writable mount or remount. This can result in a lot of unnecessary messages in the kernel log in setups where filesystems are being frequently remounted (or mounted multiple times). Avoid this by setting a superblock flag which indicates that the warning has been emitted at least once for any particular mount, as suggested in [1]. Link: https://lore.kernel.org/CAHk-=wim6VGnxQmjfK_tDg6fbHYKL4EFkmnTjVr9QnRqjDBAeA@mail.gmail.com/ [1] Link: https://lkml.kernel.org/r/20220119202934.26495-1-ailiop@suse.com Signed-off-by: Anthony Iliopoulos Reviewed-by: Christoph Hellwig Acked-by: Christian Brauner Reviewed-by: Darrick J. Wong Cc: Alexander Viro Cc: Deepa Dinamani Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 2 ++ include/linux/fs.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/fs/namespace.c b/fs/namespace.c index de6fae84f1a1..0044feef59d0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2597,6 +2597,7 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * struct super_block *sb = mnt->mnt_sb; if (!__mnt_is_readonly(mnt) && + (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { char *buf = (char *)__get_free_page(GFP_KERNEL); char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); @@ -2611,6 +2612,7 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * tm.tm_year+1900, (unsigned long long)sb->s_time_max); free_page((unsigned long)buf); + sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; } } diff --git a/include/linux/fs.h b/include/linux/fs.h index 8b5c486bd4a2..ca9445f6cf3d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1440,6 +1440,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ +#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ /* Possible states of 'frozen' field */ enum { -- cgit v1.2.3-71-gd317 From eb5279fb7e41804ecc15ed3cf716a9e2b419af57 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 22 Mar 2022 14:39:28 -0700 Subject: filemap: remove find_get_pages() It's unused now. Remove it and clean up the relevant comment. Link: https://lkml.kernel.org/r/20220208134149.47299-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox (Oracle) Cc: David Howells Cc: William Kucharski Cc: Vlastimil Babka Cc: Kirill A. Shutemov Cc: Johannes Weiner Cc: Andreas Gruenbacher Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 7 ------- mm/filemap.c | 11 ++++++----- 2 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 270bf5136c34..dc31eb981ea2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -594,13 +594,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); -static inline unsigned find_get_pages(struct address_space *mapping, - pgoff_t *start, unsigned int nr_pages, - struct page **pages) -{ - return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, - pages); -} unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, diff --git a/mm/filemap.c b/mm/filemap.c index ad8c39d90bf9..90afe301cd52 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2229,8 +2229,9 @@ out: * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * - * find_get_pages_contig() works exactly like find_get_pages(), except - * that the returned number of pages are guaranteed to be contiguous. + * find_get_pages_contig() works exactly like find_get_pages_range(), + * except that the returned number of pages are guaranteed to be + * contiguous. * * Return: the number of pages which were found. */ @@ -2290,9 +2291,9 @@ EXPORT_SYMBOL(find_get_pages_contig); * @nr_pages: the maximum number of pages * @pages: where the resulting pages are placed * - * Like find_get_pages(), except we only return head pages which are tagged - * with @tag. @index is updated to the index immediately after the last - * page we return, ready for the next iteration. + * Like find_get_pages_range(), except we only return head pages which are + * tagged with @tag. @index is updated to the index immediately after the + * last page we return, ready for the next iteration. * * Return: the number of pages which were found. */ -- cgit v1.2.3-71-gd317 From ad6c441266dcd50be080a47e1178a1b15369923c Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Tue, 22 Mar 2022 14:39:43 -0700 Subject: mm/gup: remove unused pin_user_pages_locked() This routine was used for a short while, but then the calling code was refactored and the only caller was removed. Link: https://lkml.kernel.org/r/20220204020010.68930-4-jhubbard@nvidia.com Signed-off-by: John Hubbard Reviewed-by: David Hildenbrand Reviewed-by: Jason Gunthorpe Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Reviewed-by: Claudio Imbrenda Cc: Alex Williamson Cc: Andrea Arcangeli Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lukas Bulwahn Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 -- mm/gup.c | 29 ----------------------------- 2 files changed, 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5744a3fc4716..d4a2b40066fa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1918,8 +1918,6 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); -long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, diff --git a/mm/gup.c b/mm/gup.c index 43ad2bc6ca51..f8faa646f208 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3127,32 +3127,3 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); } EXPORT_SYMBOL(pin_user_pages_unlocked); - -/* - * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked(). - * Behavior is the same, except that this one sets FOLL_PIN and rejects - * FOLL_GET. - */ -long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - int *locked) -{ - /* - * FIXME: Current FOLL_LONGTERM behavior is incompatible with - * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on - * vmas. As there are no users of this flag in this call we simply - * disallow this option for now. - */ - if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) - return -EINVAL; - - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return -EINVAL; - - gup_flags |= FOLL_PIN; - return __get_user_pages_locked(current->mm, start, nr_pages, - pages, NULL, locked, - gup_flags | FOLL_TOUCH); -} -EXPORT_SYMBOL(pin_user_pages_locked); -- cgit v1.2.3-71-gd317 From 73fd16d8080f7b1537ba7aa29917f64d6fffa664 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Tue, 22 Mar 2022 14:39:50 -0700 Subject: mm/gup: remove unused get_user_pages_locked() Now that the last caller of get_user_pages_locked() is gone, remove it. Link: https://lkml.kernel.org/r/20220204020010.68930-6-jhubbard@nvidia.com Signed-off-by: John Hubbard Reviewed-by: Jan Kara Reviewed-by: Jason Gunthorpe Reviewed-by: Claudio Imbrenda Reviewed-by: Christoph Hellwig Cc: Alex Williamson Cc: Andrea Arcangeli Cc: David Hildenbrand Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lukas Bulwahn Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 -- mm/gup.c | 59 ------------------------------------------------------ 2 files changed, 61 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index d4a2b40066fa..c02a8cc16e4f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1916,8 +1916,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); -long get_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, diff --git a/mm/gup.c b/mm/gup.c index f8faa646f208..85d59dc08644 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2126,65 +2126,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, } EXPORT_SYMBOL(get_user_pages); -/** - * get_user_pages_locked() - variant of get_user_pages() - * - * @start: starting user address - * @nr_pages: number of pages from start to pin - * @gup_flags: flags modifying lookup behaviour - * @pages: array that receives pointers to the pages pinned. - * Should be at least nr_pages long. Or NULL, if caller - * only intends to ensure the pages are faulted in. - * @locked: pointer to lock flag indicating whether lock is held and - * subsequently whether VM_FAULT_RETRY functionality can be - * utilised. Lock must initially be held. - * - * It is suitable to replace the form: - * - * mmap_read_lock(mm); - * do_something() - * get_user_pages(mm, ..., pages, NULL); - * mmap_read_unlock(mm); - * - * to: - * - * int locked = 1; - * mmap_read_lock(mm); - * do_something() - * get_user_pages_locked(mm, ..., pages, &locked); - * if (locked) - * mmap_read_unlock(mm); - * - * We can leverage the VM_FAULT_RETRY functionality in the page fault - * paths better by using either get_user_pages_locked() or - * get_user_pages_unlocked(). - * - */ -long get_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - int *locked) -{ - /* - * FIXME: Current FOLL_LONGTERM behavior is incompatible with - * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on - * vmas. As there are no users of this flag in this call we simply - * disallow this option for now. - */ - if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) - return -EINVAL; - /* - * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, - * never directly by the caller, so enforce that: - */ - if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) - return -EINVAL; - - return __get_user_pages_locked(current->mm, start, nr_pages, - pages, NULL, locked, - gup_flags | FOLL_TOUCH); -} -EXPORT_SYMBOL(get_user_pages_locked); - /* * get_user_pages_unlocked() is suitable to replace the form: * -- cgit v1.2.3-71-gd317 From f7cd16a55837f37b4c3835a2c646023e4d0f0e04 Mon Sep 17 00:00:00 2001 From: Xavier Roche Date: Tue, 22 Mar 2022 14:39:55 -0700 Subject: tmpfs: support for file creation time Various filesystems (including ext4) now support file creation time. This adds such support for tmpfs-based filesystems. Note that using shmem_getattr() on other file types than regular requires that shmem_is_huge() check type, to stop incorrect HPAGE_PMD_SIZE blksize. [hughd@google.com: three tweaks to creation time patch] Link: https://lkml.kernel.org/r/b954973a-b8d1-cab8-63bd-6ea8063de3@google.com Link: https://lkml.kernel.org/r/20220314211150.GA123458@xavier-xps Link: https://lkml.kernel.org/r/b954973a-b8d1-cab8-63bd-6ea8063de3@google.com Link: https://lkml.kernel.org/r/20220211213628.GA1919658@xavier-xps Signed-off-by: Xavier Roche Signed-off-by: Hugh Dickins Tested-by: Jean Delvare Tested-by: Sylvain Bellone Reported-by: Xavier Grand Reviewed-by: Jean Delvare Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 1 + mm/shmem.c | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index e65b80ed09e7..ab51d3cd39bd 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -24,6 +24,7 @@ struct shmem_inode_info { struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ atomic_t stop_eviction; /* hold when working on inode */ + struct timespec64 i_crtime; /* file creation time */ struct inode vfs_inode; }; diff --git a/mm/shmem.c b/mm/shmem.c index a09b29ec2b45..f8205b2b0322 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -476,6 +476,8 @@ bool shmem_is_huge(struct vm_area_struct *vma, { loff_t i_size; + if (!S_ISREG(inode->i_mode)) + return false; if (shmem_huge == SHMEM_HUGE_DENY) return false; if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) || @@ -1061,6 +1063,12 @@ static int shmem_getattr(struct user_namespace *mnt_userns, if (shmem_is_huge(NULL, inode, 0)) stat->blksize = HPAGE_PMD_SIZE; + if (request_mask & STATX_BTIME) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = info->i_crtime.tv_sec; + stat->btime.tv_nsec = info->i_crtime.tv_nsec; + } + return 0; } @@ -1854,9 +1862,6 @@ repeat: return 0; } - /* Never use a huge page for shmem_symlink() */ - if (S_ISLNK(inode->i_mode)) - goto alloc_nohuge; if (!shmem_is_huge(vma, inode, index)) goto alloc_nohuge; @@ -2265,6 +2270,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode atomic_set(&info->stop_eviction, 0); info->seals = F_SEAL_SEAL; info->flags = flags & VM_NORESERVE; + info->i_crtime = inode->i_mtime; INIT_LIST_HEAD(&info->shrinklist); INIT_LIST_HEAD(&info->swaplist); simple_xattrs_init(&info->xattrs); @@ -3196,6 +3202,7 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) #endif /* CONFIG_TMPFS_XATTR */ static const struct inode_operations shmem_short_symlink_operations = { + .getattr = shmem_getattr, .get_link = simple_get_link, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, @@ -3203,6 +3210,7 @@ static const struct inode_operations shmem_short_symlink_operations = { }; static const struct inode_operations shmem_symlink_inode_operations = { + .getattr = shmem_getattr, .get_link = shmem_get_link, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, @@ -3790,6 +3798,7 @@ static const struct inode_operations shmem_inode_operations = { static const struct inode_operations shmem_dir_inode_operations = { #ifdef CONFIG_TMPFS + .getattr = shmem_getattr, .create = shmem_create, .lookup = simple_lookup, .link = shmem_link, @@ -3811,6 +3820,7 @@ static const struct inode_operations shmem_dir_inode_operations = { }; static const struct inode_operations shmem_special_inode_operations = { + .getattr = shmem_getattr, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, #endif -- cgit v1.2.3-71-gd317 From a8c49af3be5f0b4e105ef678bcf14ef102c270be Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 22 Mar 2022 14:40:10 -0700 Subject: memcg: add per-memcg total kernel memory stat Currently memcg stats show several types of kernel memory: kernel stack, page tables, sock, vmalloc, and slab. However, there are other allocations with __GFP_ACCOUNT (or supersets such as GFP_KERNEL_ACCOUNT) that are not accounted in any of those stats, a few examples are: - various kvm allocations (e.g. allocated pages to create vcpus) - io_uring - tmp_page in pipes during pipe_write() - bpf ringbuffers - unix sockets Keeping track of the total kernel memory is essential for the ease of migration from cgroup v1 to v2 as there are large discrepancies between v1's kmem.usage_in_bytes and the sum of the available kernel memory stats in v2. Adding separate memcg stats for all __GFP_ACCOUNT kernel allocations is an impractical maintenance burden as there a lot of those all over the kernel code, with more use cases likely to show up in the future. Therefore, add a "kernel" memcg stat that is analogous to kmem page counter, with added benefits such as using rstat infrastructure which aggregates stats more efficiently. Additionally, this provides a lighter alternative in case the legacy kmem is deprecated in the future [yosryahmed@google.com: v2] Link: https://lkml.kernel.org/r/20220203193856.972500-1-yosryahmed@google.com Link: https://lkml.kernel.org/r/20220201200823.3283171-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/cgroup-v2.rst | 5 +++++ include/linux/memcontrol.h | 1 + mm/memcontrol.c | 27 +++++++++++++++++++++------ 3 files changed, 27 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 5aa368d165da..69d7a6983f78 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1301,6 +1301,11 @@ PAGE_SIZE multiple when read back. Amount of memory used to cache filesystem data, including tmpfs and shared memory. + kernel (npn) + Amount of total kernel memory, including + (kernel_stack, pagetables, percpu, vmalloc, slab) in + addition to other kernel memory use cases. + kernel_stack Amount of memory allocated to kernel stacks. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0abbd685703b..8612d7dd0859 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -34,6 +34,7 @@ enum memcg_stat_item { MEMCG_SOCK, MEMCG_PERCPU_B, MEMCG_VMALLOC, + MEMCG_KMEM, MEMCG_NR_STAT, }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 209e66893da6..e64a276837b0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1371,6 +1371,7 @@ struct memory_stat { static const struct memory_stat memory_stats[] = { { "anon", NR_ANON_MAPPED }, { "file", NR_FILE_PAGES }, + { "kernel", MEMCG_KMEM }, { "kernel_stack", NR_KERNEL_STACK_KB }, { "pagetables", NR_PAGETABLE }, { "percpu", MEMCG_PERCPU_B }, @@ -2114,6 +2115,7 @@ static DEFINE_MUTEX(percpu_charge_mutex); static void drain_obj_stock(struct obj_stock *stock); static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, struct mem_cgroup *root_memcg); +static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages); #else static inline void drain_obj_stock(struct obj_stock *stock) @@ -2124,6 +2126,9 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, { return false; } +static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) +{ +} #endif /** @@ -2979,6 +2984,18 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } +static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) +{ + mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { + if (nr_pages > 0) + page_counter_charge(&memcg->kmem, nr_pages); + else + page_counter_uncharge(&memcg->kmem, -nr_pages); + } +} + + /* * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg * @objcg: object cgroup to uncharge @@ -2991,8 +3008,7 @@ static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, memcg = get_mem_cgroup_from_objcg(objcg); - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - page_counter_uncharge(&memcg->kmem, nr_pages); + memcg_account_kmem(memcg, -nr_pages); refill_stock(memcg, nr_pages); css_put(&memcg->css); @@ -3018,8 +3034,7 @@ static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, if (ret) goto out; - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - page_counter_charge(&memcg->kmem, nr_pages); + memcg_account_kmem(memcg, nr_pages); out: css_put(&memcg->css); @@ -6801,8 +6816,8 @@ static void uncharge_batch(const struct uncharge_gather *ug) page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); if (do_memsw_account()) page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) - page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); + if (ug->nr_kmem) + memcg_account_kmem(ug->memcg, -ug->nr_kmem); memcg_oom_recover(ug->memcg); } -- cgit v1.2.3-71-gd317 From 486bc7060cb510fa60cb85a013d5ed51ce0fe456 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 22 Mar 2022 14:40:16 -0700 Subject: mm/memcg: retrieve parent memcg from css.parent The parent we get from page_counter is correct, while this is two different hierarchy. Let's retrieve the parent memcg from css.parent just like parent_cs(), blkcg_parent(), etc. Link: https://lkml.kernel.org/r/20220201004643.8391-2-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Muchun Song Acked-by: Michal Hocko Reviewed-by: Roman Gushchin Reviewed-by: Shakeel Butt Cc: Roman Gushchin Cc: Johannes Weiner Cc: Vladimir Davydov Cc: Yang Shi Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 8612d7dd0859..ef4b445392a9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -842,9 +842,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) */ static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { - if (!memcg->memory.parent) - return NULL; - return mem_cgroup_from_counter(memcg->memory.parent, memory); + return mem_cgroup_from_css(memcg->css.parent); } static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, -- cgit v1.2.3-71-gd317 From 6a6b7b77cc0fdc13f50c66c219c8c05500a8dfce Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:40:53 -0700 Subject: mm: list_lru: transpose the array of per-node per-memcg lru lists Patch series "Optimize list lru memory consumption", v6. In our server, we found a suspected memory leak problem. The kmalloc-32 consumes more than 6GB of memory. Other kmem_caches consume less than 2GB memory. After our in-depth analysis, the memory consumption of kmalloc-32 slab cache is the cause of list_lru_one allocation. crash> p memcg_nr_cache_ids memcg_nr_cache_ids = $2 = 24574 memcg_nr_cache_ids is very large and memory consumption of each list_lru can be calculated with the following formula. num_numa_node * memcg_nr_cache_ids * 32 (kmalloc-32) There are 4 numa nodes in our system, so each list_lru consumes ~3MB. crash> list super_blocks | wc -l 952 Every mount will register 2 list lrus, one is for inode, another is for dentry. There are 952 super_blocks. So the total memory is 952 * 2 * 3 MB (~5.6GB). But now the number of memory cgroups is less than 500. So I guess more than 12286 memory cgroups have been created on this machine (I do not know why there are so many cgroups, it may be a user's bug or the user really want to do that). Because memcg_nr_cache_ids has not been reduced to a suitable value. It leads to waste a lot of memory. If we want to reduce memcg_nr_cache_ids, we have to *reboot* the server. This is not what we want. In order to reduce memcg_nr_cache_ids, I had posted a patchset [1] to do this. But this did not fundamentally solve the problem. We currently allocate scope for every memcg to be able to tracked on every superblock instantiated in the system, regardless of whether that superblock is even accessible to that memcg. These huge memcg counts come from container hosts where memcgs are confined to just a small subset of the total number of superblocks that instantiated at any given point in time. For these systems with huge container counts, list_lru does not need the capability of tracking every memcg on every superblock. What it comes down to is that the list_lru is only needed for a given memcg if that memcg is instatiating and freeing objects on a given list_lru. As Dave said, "Which makes me think we should be moving more towards 'add the memcg to the list_lru at the first insert' model rather than 'instantiate all at memcg init time just in case'." This patchset aims to optimize the list lru memory consumption from different aspects. I had done a easy test to show the optimization. I create 10k memory cgroups and mount 10k filesystems in the systems. We use free command to show how many memory does the systems comsumes after this operation (There are 2 numa nodes in the system). +-----------------------+------------------------+ | condition | memory consumption | +-----------------------+------------------------+ | without this patchset | 24464 MB | +-----------------------+------------------------+ | after patch 1 | 21957 MB | <--------+ +-----------------------+------------------------+ | | after patch 10 | 6895 MB | | +-----------------------+------------------------+ | | after patch 12 | 4367 MB | | +-----------------------+------------------------+ | | The more the number of nodes, the more obvious the effect---+ BTW, there was a recent discussion [2] on the same issue. [1] https://lore.kernel.org/all/20210428094949.43579-1-songmuchun@bytedance.com/ [2] https://lore.kernel.org/all/20210405054848.GA1077931@in.ibm.com/ This series not only optimizes the memory usage of list_lru but also simplifies the code. This patch (of 16): The current scheme of maintaining per-node per-memcg lru lists looks like: struct list_lru { struct list_lru_node *node; (for each node) struct list_lru_memcg *memcg_lrus; struct list_lru_one *lru[]; (for each memcg) } By effectively transposing the two-dimension array of list_lru_one's structures (per-node per-memcg => per-memcg per-node) it's possible to save some memory and simplify alloc/dealloc paths. The new scheme looks like: struct list_lru { struct list_lru_memcg *mlrus; struct list_lru_per_memcg *mlru[]; (for each memcg) struct list_lru_one node[0]; (for each node) } Memory savings are coming from not only 'struct rcu_head' but also some pointer arrays used to store the pointer to 'struct list_lru_one'. The array is per node and its size is 8 (a pointer) * num_memcgs. So the total size of the arrays is 8 * num_nodes * memcg_nr_cache_ids. After this patch, the size becomes 8 * memcg_nr_cache_ids. Link: https://lkml.kernel.org/r/20220228122126.37293-1-songmuchun@bytedance.com Link: https://lkml.kernel.org/r/20220228122126.37293-2-songmuchun@bytedance.com Signed-off-by: Muchun Song Acked-by: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Vladimir Davydov Cc: Shakeel Butt Cc: Yang Shi Cc: Alex Shi Cc: Wei Yang Cc: Dave Chinner Cc: Trond Myklebust Cc: Anna Schumaker Cc: Jaegeuk Kim Cc: Chao Yu Cc: Kari Argillander Cc: Vlastimil Babka Cc: Qi Zheng Cc: Xiongchun Duan Cc: Fam Zheng Cc: Roman Gushchin Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 17 ++-- mm/list_lru.c | 206 +++++++++++++++++------------------------------ 2 files changed, 86 insertions(+), 137 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 1b5fceb565df..729a27b6ff53 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -31,10 +31,15 @@ struct list_lru_one { long nr_items; }; +struct list_lru_per_memcg { + /* array of per cgroup per node lists, indexed by node id */ + struct list_lru_one node[0]; +}; + struct list_lru_memcg { - struct rcu_head rcu; + struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_one *lru[]; + struct list_lru_per_memcg *mlru[]; }; struct list_lru_node { @@ -42,11 +47,7 @@ struct list_lru_node { spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; -#ifdef CONFIG_MEMCG_KMEM - /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg __rcu *memcg_lrus; -#endif - long nr_items; + long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { @@ -55,6 +56,8 @@ struct list_lru { struct list_head list; int shrinker_id; bool memcg_aware; + /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ + struct list_lru_memcg __rcu *mlrus; #endif }; diff --git a/mm/list_lru.c b/mm/list_lru.c index 0cd5e89ca063..7d1356241aa8 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -49,35 +49,37 @@ static int lru_shrinker_id(struct list_lru *lru) } static inline struct list_lru_one * -list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) +list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg *mlrus; + struct list_lru_node *nlru = &lru->node[nid]; + /* * Either lock or RCU protects the array of per cgroup lists - * from relocation (see memcg_update_list_lru_node). + * from relocation (see memcg_update_list_lru). */ - memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, - lockdep_is_held(&nlru->lock)); - if (memcg_lrus && idx >= 0) - return memcg_lrus->lru[idx]; + mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock)); + if (mlrus && idx >= 0) + return &mlrus->mlru[idx]->node[nid]; return &nlru->lru; } static inline struct list_lru_one * -list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, +list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, struct mem_cgroup **memcg_ptr) { + struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l = &nlru->lru; struct mem_cgroup *memcg = NULL; - if (!nlru->memcg_lrus) + if (!lru->mlrus) goto out; memcg = mem_cgroup_from_obj(ptr); if (!memcg) goto out; - l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); + l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg)); out: if (memcg_ptr) *memcg_ptr = memcg; @@ -103,18 +105,18 @@ static inline bool list_lru_memcg_aware(struct list_lru *lru) } static inline struct list_lru_one * -list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) +list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { - return &nlru->lru; + return &lru->node[nid].lru; } static inline struct list_lru_one * -list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, +list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, struct mem_cgroup **memcg_ptr) { if (memcg_ptr) *memcg_ptr = NULL; - return &nlru->lru; + return &lru->node[nid].lru; } #endif /* CONFIG_MEMCG_KMEM */ @@ -127,7 +129,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) spin_lock(&nlru->lock); if (list_empty(item)) { - l = list_lru_from_kmem(nlru, item, &memcg); + l = list_lru_from_kmem(lru, nid, item, &memcg); list_add_tail(item, &l->list); /* Set shrinker bit if the first element was added */ if (!l->nr_items++) @@ -150,7 +152,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) spin_lock(&nlru->lock); if (!list_empty(item)) { - l = list_lru_from_kmem(nlru, item, NULL); + l = list_lru_from_kmem(lru, nid, item, NULL); list_del_init(item); l->nr_items--; nlru->nr_items--; @@ -180,12 +182,11 @@ EXPORT_SYMBOL_GPL(list_lru_isolate_move); unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { - struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; long count; rcu_read_lock(); - l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); + l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg)); count = READ_ONCE(l->nr_items); rcu_read_unlock(); @@ -206,16 +207,16 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid) EXPORT_SYMBOL_GPL(list_lru_count_node); static unsigned long -__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, +__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { - + struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; struct list_head *item, *n; unsigned long isolated = 0; - l = list_lru_from_memcg_idx(nlru, memcg_idx); + l = list_lru_from_memcg_idx(lru, nid, memcg_idx); restart: list_for_each_safe(item, n, &l->list) { enum lru_status ret; @@ -272,8 +273,8 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, unsigned long ret; spin_lock(&nlru->lock); - ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, - nr_to_walk); + ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, + cb_arg, nr_to_walk); spin_unlock(&nlru->lock); return ret; } @@ -288,8 +289,8 @@ list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, unsigned long ret; spin_lock_irq(&nlru->lock); - ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, - nr_to_walk); + ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, + cb_arg, nr_to_walk); spin_unlock_irq(&nlru->lock); return ret; } @@ -308,7 +309,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); - isolated += __list_lru_walk_one(nlru, memcg_idx, + isolated += __list_lru_walk_one(lru, nid, memcg_idx, isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); @@ -328,166 +329,111 @@ static void init_one_lru(struct list_lru_one *l) } #ifdef CONFIG_MEMCG_KMEM -static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, - int begin, int end) +static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus, + int begin, int end) { int i; for (i = begin; i < end; i++) - kfree(memcg_lrus->lru[i]); + kfree(mlrus->mlru[i]); } -static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, - int begin, int end) +static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus, + int begin, int end) { int i; for (i = begin; i < end; i++) { - struct list_lru_one *l; + int nid; + struct list_lru_per_memcg *mlru; - l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); - if (!l) + mlru = kmalloc(struct_size(mlru, node, nr_node_ids), GFP_KERNEL); + if (!mlru) goto fail; - init_one_lru(l); - memcg_lrus->lru[i] = l; + for_each_node(nid) + init_one_lru(&mlru->node[nid]); + mlrus->mlru[i] = mlru; } return 0; fail: - __memcg_destroy_list_lru_node(memcg_lrus, begin, i); + memcg_destroy_list_lru_range(mlrus, begin, i); return -ENOMEM; } -static int memcg_init_list_lru_node(struct list_lru_node *nlru) +static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg *mlrus; int size = memcg_nr_cache_ids; - memcg_lrus = kvmalloc(struct_size(memcg_lrus, lru, size), GFP_KERNEL); - if (!memcg_lrus) + lru->memcg_aware = memcg_aware; + if (!memcg_aware) + return 0; + + mlrus = kvmalloc(struct_size(mlrus, mlru, size), GFP_KERNEL); + if (!mlrus) return -ENOMEM; - if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { - kvfree(memcg_lrus); + if (memcg_init_list_lru_range(mlrus, 0, size)) { + kvfree(mlrus); return -ENOMEM; } - RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); + RCU_INIT_POINTER(lru->mlrus, mlrus); return 0; } -static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) +static void memcg_destroy_list_lru(struct list_lru *lru) { - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg *mlrus; + + if (!list_lru_memcg_aware(lru)) + return; + /* * This is called when shrinker has already been unregistered, * and nobody can use it. So, there is no need to use kvfree_rcu(). */ - memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); - __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); - kvfree(memcg_lrus); + mlrus = rcu_dereference_protected(lru->mlrus, true); + memcg_destroy_list_lru_range(mlrus, 0, memcg_nr_cache_ids); + kvfree(mlrus); } -static int memcg_update_list_lru_node(struct list_lru_node *nlru, - int old_size, int new_size) +static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) { struct list_lru_memcg *old, *new; BUG_ON(old_size > new_size); - old = rcu_dereference_protected(nlru->memcg_lrus, + old = rcu_dereference_protected(lru->mlrus, lockdep_is_held(&list_lrus_mutex)); - new = kvmalloc(struct_size(new, lru, new_size), GFP_KERNEL); + new = kvmalloc(struct_size(new, mlru, new_size), GFP_KERNEL); if (!new) return -ENOMEM; - if (__memcg_init_list_lru_node(new, old_size, new_size)) { + if (memcg_init_list_lru_range(new, old_size, new_size)) { kvfree(new); return -ENOMEM; } - memcpy(&new->lru, &old->lru, flex_array_size(new, lru, old_size)); - rcu_assign_pointer(nlru->memcg_lrus, new); + memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size)); + rcu_assign_pointer(lru->mlrus, new); kvfree_rcu(old, rcu); return 0; } -static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, - int old_size, int new_size) -{ - struct list_lru_memcg *memcg_lrus; - - memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, - lockdep_is_held(&list_lrus_mutex)); - /* do not bother shrinking the array back to the old size, because we - * cannot handle allocation failures here */ - __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); -} - -static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) -{ - int i; - - lru->memcg_aware = memcg_aware; - - if (!memcg_aware) - return 0; - - for_each_node(i) { - if (memcg_init_list_lru_node(&lru->node[i])) - goto fail; - } - return 0; -fail: - for (i = i - 1; i >= 0; i--) { - if (!lru->node[i].memcg_lrus) - continue; - memcg_destroy_list_lru_node(&lru->node[i]); - } - return -ENOMEM; -} - -static void memcg_destroy_list_lru(struct list_lru *lru) -{ - int i; - - if (!list_lru_memcg_aware(lru)) - return; - - for_each_node(i) - memcg_destroy_list_lru_node(&lru->node[i]); -} - -static int memcg_update_list_lru(struct list_lru *lru, - int old_size, int new_size) -{ - int i; - - for_each_node(i) { - if (memcg_update_list_lru_node(&lru->node[i], - old_size, new_size)) - goto fail; - } - return 0; -fail: - for (i = i - 1; i >= 0; i--) { - if (!lru->node[i].memcg_lrus) - continue; - - memcg_cancel_update_list_lru_node(&lru->node[i], - old_size, new_size); - } - return -ENOMEM; -} - static void memcg_cancel_update_list_lru(struct list_lru *lru, int old_size, int new_size) { - int i; + struct list_lru_memcg *mlrus; - for_each_node(i) - memcg_cancel_update_list_lru_node(&lru->node[i], - old_size, new_size); + mlrus = rcu_dereference_protected(lru->mlrus, + lockdep_is_held(&list_lrus_mutex)); + /* + * Do not bother shrinking the array back to the old size, because we + * cannot handle allocation failures here. + */ + memcg_destroy_list_lru_range(mlrus, old_size, new_size); } int memcg_update_all_list_lrus(int new_size) @@ -524,8 +470,8 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, */ spin_lock_irq(&nlru->lock); - src = list_lru_from_memcg_idx(nlru, src_idx); - dst = list_lru_from_memcg_idx(nlru, dst_idx); + src = list_lru_from_memcg_idx(lru, nid, src_idx); + dst = list_lru_from_memcg_idx(lru, nid, dst_idx); list_splice_init(&src->list, &dst->list); -- cgit v1.2.3-71-gd317 From 88f2ef73fd66491a2f9a82373d22ca6540f23c62 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:40:56 -0700 Subject: mm: introduce kmem_cache_alloc_lru We currently allocate scope for every memcg to be able to tracked on every superblock instantiated in the system, regardless of whether that superblock is even accessible to that memcg. These huge memcg counts come from container hosts where memcgs are confined to just a small subset of the total number of superblocks that instantiated at any given point in time. For these systems with huge container counts, list_lru does not need the capability of tracking every memcg on every superblock. What it comes down to is that adding the memcg to the list_lru at the first insert. So introduce kmem_cache_alloc_lru to allocate objects and its list_lru. In the later patch, we will convert all inode and dentry allocation from kmem_cache_alloc to kmem_cache_alloc_lru. Link: https://lkml.kernel.org/r/20220228122126.37293-3-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 4 ++ include/linux/memcontrol.h | 14 ++++++ include/linux/slab.h | 3 ++ mm/list_lru.c | 104 +++++++++++++++++++++++++++++++++++++++++---- mm/memcontrol.c | 14 ------ mm/slab.c | 39 +++++++++++------ mm/slab.h | 25 +++++++++-- mm/slob.c | 6 +++ mm/slub.c | 42 ++++++++++++------ 9 files changed, 198 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 729a27b6ff53..ab912c49334f 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -56,6 +56,8 @@ struct list_lru { struct list_head list; int shrinker_id; bool memcg_aware; + /* protects ->mlrus->mlru[i] */ + spinlock_t lock; /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ struct list_lru_memcg __rcu *mlrus; #endif @@ -72,6 +74,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, #define list_lru_init_memcg(lru, shrinker) \ __list_lru_init((lru), true, NULL, shrinker) +int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, + gfp_t gfp); int memcg_update_all_list_lrus(int num_memcgs); void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ef4b445392a9..b636732f0c14 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -524,6 +524,20 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } +static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) +{ + struct mem_cgroup *memcg; + + rcu_read_lock(); +retry: + memcg = obj_cgroup_memcg(objcg); + if (unlikely(!css_tryget(&memcg->css))) + goto retry; + rcu_read_unlock(); + + return memcg; +} + #ifdef CONFIG_MEMCG_KMEM /* * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. diff --git a/include/linux/slab.h b/include/linux/slab.h index 5b6193fd8bd9..e6addaf91afd 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -135,6 +135,7 @@ #include +struct list_lru; struct mem_cgroup; /* * struct kmem_cache related prototypes @@ -416,6 +417,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size, void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *s, void *objp); /* diff --git a/mm/list_lru.c b/mm/list_lru.c index 7d1356241aa8..bffa80527723 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -13,6 +13,7 @@ #include #include #include "slab.h" +#include "internal.h" #ifdef CONFIG_MEMCG_KMEM static LIST_HEAD(memcg_list_lrus); @@ -338,22 +339,30 @@ static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus, kfree(mlrus->mlru[i]); } +static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) +{ + int nid; + struct list_lru_per_memcg *mlru; + + mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); + if (!mlru) + return NULL; + + for_each_node(nid) + init_one_lru(&mlru->node[nid]); + + return mlru; +} + static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus, int begin, int end) { int i; for (i = begin; i < end; i++) { - int nid; - struct list_lru_per_memcg *mlru; - - mlru = kmalloc(struct_size(mlru, node, nr_node_ids), GFP_KERNEL); - if (!mlru) + mlrus->mlru[i] = memcg_init_list_lru_one(GFP_KERNEL); + if (!mlrus->mlru[i]) goto fail; - - for_each_node(nid) - init_one_lru(&mlru->node[nid]); - mlrus->mlru[i] = mlru; } return 0; fail: @@ -370,6 +379,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) if (!memcg_aware) return 0; + spin_lock_init(&lru->lock); + mlrus = kvmalloc(struct_size(mlrus, mlru, size), GFP_KERNEL); if (!mlrus) return -ENOMEM; @@ -416,8 +427,11 @@ static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_siz return -ENOMEM; } + spin_lock_irq(&lru->lock); memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size)); rcu_assign_pointer(lru->mlrus, new); + spin_unlock_irq(&lru->lock); + kvfree_rcu(old, rcu); return 0; } @@ -502,6 +516,78 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) memcg_drain_list_lru(lru, src_idx, dst_memcg); mutex_unlock(&list_lrus_mutex); } + +static bool memcg_list_lru_allocated(struct mem_cgroup *memcg, + struct list_lru *lru) +{ + bool allocated; + int idx; + + idx = memcg->kmemcg_id; + if (unlikely(idx < 0)) + return true; + + rcu_read_lock(); + allocated = !!rcu_dereference(lru->mlrus)->mlru[idx]; + rcu_read_unlock(); + + return allocated; +} + +int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, + gfp_t gfp) +{ + int i; + unsigned long flags; + struct list_lru_memcg *mlrus; + struct list_lru_memcg_table { + struct list_lru_per_memcg *mlru; + struct mem_cgroup *memcg; + } *table; + + if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) + return 0; + + gfp &= GFP_RECLAIM_MASK; + table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp); + if (!table) + return -ENOMEM; + + /* + * Because the list_lru can be reparented to the parent cgroup's + * list_lru, we should make sure that this cgroup and all its + * ancestors have allocated list_lru_per_memcg. + */ + for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { + if (memcg_list_lru_allocated(memcg, lru)) + break; + + table[i].memcg = memcg; + table[i].mlru = memcg_init_list_lru_one(gfp); + if (!table[i].mlru) { + while (i--) + kfree(table[i].mlru); + kfree(table); + return -ENOMEM; + } + } + + spin_lock_irqsave(&lru->lock, flags); + mlrus = rcu_dereference_protected(lru->mlrus, true); + while (i--) { + int index = table[i].memcg->kmemcg_id; + + if (mlrus->mlru[index]) + kfree(table[i].mlru); + else + mlrus->mlru[index] = table[i].mlru; + } + spin_unlock_irqrestore(&lru->lock, flags); + + kfree(table); + + return 0; +} #else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 85a259515e91..52835528eb2a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2805,20 +2805,6 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) folio->memcg_data = (unsigned long)memcg; } -static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) -{ - struct mem_cgroup *memcg; - - rcu_read_lock(); -retry: - memcg = obj_cgroup_memcg(objcg); - if (unlikely(!css_tryget(&memcg->css))) - goto retry; - rcu_read_unlock(); - - return memcg; -} - #ifdef CONFIG_MEMCG_KMEM /* * The allocated objcg pointers array is not accounted directly. diff --git a/mm/slab.c b/mm/slab.c index ddf5737c63d9..d9dec7a8fd79 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3211,7 +3211,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_ bool init = false; flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); + cachep = slab_pre_alloc_hook(cachep, NULL, &objcg, 1, flags); if (unlikely(!cachep)) return NULL; @@ -3287,7 +3287,8 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) #endif /* CONFIG_NUMA */ static __always_inline void * -slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller) +slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, + size_t orig_size, unsigned long caller) { unsigned long save_flags; void *objp; @@ -3295,7 +3296,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo bool init = false; flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); + cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags); if (unlikely(!cachep)) return NULL; @@ -3484,6 +3485,18 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, __free_one(ac, objp); } +static __always_inline +void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, + gfp_t flags) +{ + void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); + + trace_kmem_cache_alloc(_RET_IP_, ret, + cachep->object_size, cachep->size, flags); + + return ret; +} + /** * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. @@ -3496,15 +3509,17 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, */ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { - void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_); - - trace_kmem_cache_alloc(_RET_IP_, ret, - cachep->object_size, cachep->size, flags); - - return ret; + return __kmem_cache_alloc_lru(cachep, NULL, flags); } EXPORT_SYMBOL(kmem_cache_alloc); +void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, + gfp_t flags) +{ + return __kmem_cache_alloc_lru(cachep, lru, flags); +} +EXPORT_SYMBOL(kmem_cache_alloc_lru); + static __always_inline void cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p, unsigned long caller) @@ -3521,7 +3536,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, size_t i; struct obj_cgroup *objcg = NULL; - s = slab_pre_alloc_hook(s, &objcg, size, flags); + s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); if (!s) return 0; @@ -3562,7 +3577,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) { void *ret; - ret = slab_alloc(cachep, flags, size, _RET_IP_); + ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(_RET_IP_, ret, @@ -3689,7 +3704,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - ret = slab_alloc(cachep, flags, size, caller); + ret = slab_alloc(cachep, NULL, flags, size, caller); ret = kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(caller, ret, diff --git a/mm/slab.h b/mm/slab.h index c7f2abc2b154..fd7ae2024897 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -231,6 +231,7 @@ struct kmem_cache { #include #include #include +#include /* * State of the slab allocator. @@ -472,6 +473,7 @@ static inline size_t obj_full_size(struct kmem_cache *s) * Returns false if the allocation should fail. */ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t objects, gfp_t flags) { @@ -487,13 +489,26 @@ static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, if (!objcg) return true; - if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { - obj_cgroup_put(objcg); - return false; + if (lru) { + int ret; + struct mem_cgroup *memcg; + + memcg = get_mem_cgroup_from_objcg(objcg); + ret = memcg_list_lru_alloc(memcg, lru, flags); + css_put(&memcg->css); + + if (ret) + goto out; } + if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) + goto out; + *objcgp = objcg; return true; +out: + obj_cgroup_put(objcg); + return false; } static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, @@ -598,6 +613,7 @@ static inline void memcg_free_slab_cgroups(struct slab *slab) } static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t objects, gfp_t flags) { @@ -697,6 +713,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, + struct list_lru *lru, struct obj_cgroup **objcgp, size_t size, gfp_t flags) { @@ -707,7 +724,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (should_failslab(s, flags)) return NULL; - if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) + if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) return NULL; return s; diff --git a/mm/slob.c b/mm/slob.c index 60c5842215f1..8a8795520361 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -635,6 +635,12 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_alloc); + +void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags) +{ + return slob_alloc_node(cachep, flags, NUMA_NO_NODE); +} +EXPORT_SYMBOL(kmem_cache_alloc_lru); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t gfp, int node) { diff --git a/mm/slub.c b/mm/slub.c index 261474092e43..07cdd999c3fe 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3131,7 +3131,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, * * Otherwise we can simply pick the next object from the lockless free list. */ -static __always_inline void *slab_alloc_node(struct kmem_cache *s, +static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) { void *object; @@ -3141,7 +3141,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct obj_cgroup *objcg = NULL; bool init = false; - s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); + s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); if (!s) return NULL; @@ -3232,27 +3232,41 @@ out: return object; } -static __always_inline void *slab_alloc(struct kmem_cache *s, +static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags, unsigned long addr, size_t orig_size) { - return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); + return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); } -void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) +static __always_inline +void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); + void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); return ret; } + +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) +{ + return __kmem_cache_alloc_lru(s, NULL, gfpflags); +} EXPORT_SYMBOL(kmem_cache_alloc); +void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) +{ + return __kmem_cache_alloc_lru(s, lru, gfpflags); +} +EXPORT_SYMBOL(kmem_cache_alloc_lru); + #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); + void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; @@ -3263,7 +3277,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); + void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -3277,7 +3291,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); + void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -3667,7 +3681,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, struct obj_cgroup *objcg = NULL; /* memcg and kmem_cache debug support */ - s = slab_pre_alloc_hook(s, &objcg, size, flags); + s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); if (unlikely(!s)) return false; /* @@ -4417,7 +4431,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, _RET_IP_, size); + ret = slab_alloc(s, NULL, flags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -4465,7 +4479,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, flags, node, _RET_IP_, size); + ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4923,7 +4937,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, caller, size); + ret = slab_alloc(s, NULL, gfpflags, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4954,7 +4968,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, gfpflags, node, caller, size); + ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); -- cgit v1.2.3-71-gd317 From 8b9f3ac5b01db85c6cf74c2c3a71280cc3045c9c Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:00 -0700 Subject: fs: introduce alloc_inode_sb() to allocate filesystems specific inode The allocated inode cache is supposed to be added to its memcg list_lru which should be allocated as well in advance. That can be done by kmem_cache_alloc_lru() which allocates object and list_lru. The file systems is main user of it. So introduce alloc_inode_sb() to allocate file system specific inodes and set up the inode reclaim context properly. The file system is supposed to use alloc_inode_sb() to allocate inodes. In later patches, we will convert all users to the new API. Link: https://lkml.kernel.org/r/20220228122126.37293-4-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Roman Gushchin Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/porting.rst | 6 ++++++ fs/inode.c | 2 +- include/linux/fs.h | 11 +++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index bf19fd6b86e7..7c1583dbeb59 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -45,6 +45,12 @@ typically between calling iget_locked() and unlocking the inode. At some point that will become mandatory. +**mandatory** + +The foo_inode_info should always be allocated through alloc_inode_sb() rather +than kmem_cache_alloc() or kmalloc() related to set up the inode reclaim context +correctly. + --- **mandatory** diff --git a/fs/inode.c b/fs/inode.c index 63324df6fa27..9d9b422504d1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -259,7 +259,7 @@ static struct inode *alloc_inode(struct super_block *sb) if (ops->alloc_inode) inode = ops->alloc_inode(sb); else - inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); + inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL); if (!inode) return NULL; diff --git a/include/linux/fs.h b/include/linux/fs.h index ca9445f6cf3d..58a73e59e4c0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -3114,6 +3115,16 @@ extern void free_inode_nonrcu(struct inode *inode); extern int should_remove_suid(struct dentry *); extern int file_remove_privs(struct file *); +/* + * This must be used for allocating filesystems specific inodes to set + * up the inode reclaim context correctly. + */ +static inline void * +alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp) +{ + return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp); +} + extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) { -- cgit v1.2.3-71-gd317 From 9bbdc0f324097f72b2354c2f8be4cdffd32679b6 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:12 -0700 Subject: xarray: use kmem_cache_alloc_lru to allocate xa_node The workingset will add the xa_node to the shadow_nodes list. So the allocation of xa_node should be done by kmem_cache_alloc_lru(). Using xas_set_lru() to pass the list_lru which we want to insert xa_node into to set up the xa_node reclaim context correctly. Link: https://lkml.kernel.org/r/20220228122126.37293-9-songmuchun@bytedance.com Signed-off-by: Muchun Song Acked-by: Johannes Weiner Acked-by: Roman Gushchin Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 5 ++++- include/linux/xarray.h | 9 ++++++++- lib/xarray.c | 10 +++++----- mm/workingset.c | 2 +- 4 files changed, 18 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 1d38d9475c4d..3db431276d82 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -334,9 +334,12 @@ void workingset_activation(struct folio *folio); /* Only track the nodes of mappings with shadow entries */ void workingset_update_node(struct xa_node *node); +extern struct list_lru shadow_nodes; #define mapping_set_update(xas, mapping) do { \ - if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ xas_set_update(xas, workingset_update_node); \ + xas_set_lru(xas, &shadow_nodes); \ + } \ } while (0) /* linux/mm/page_alloc.c */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index d6d5da6ed735..bb52b786be1b 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1317,6 +1317,7 @@ struct xa_state { struct xa_node *xa_node; struct xa_node *xa_alloc; xa_update_node_t xa_update; + struct list_lru *xa_lru; }; /* @@ -1336,7 +1337,8 @@ struct xa_state { .xa_pad = 0, \ .xa_node = XAS_RESTART, \ .xa_alloc = NULL, \ - .xa_update = NULL \ + .xa_update = NULL, \ + .xa_lru = NULL, \ } /** @@ -1631,6 +1633,11 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) xas->xa_update = update; } +static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru) +{ + xas->xa_lru = lru; +} + /** * xas_next_entry() - Advance iterator to next present entry. * @xas: XArray operation state. diff --git a/lib/xarray.c b/lib/xarray.c index 6f47f6375808..b95e92598b9c 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -302,7 +302,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp) } if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!xas->xa_alloc) return false; xas->xa_alloc->parent = NULL; @@ -334,10 +334,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) gfp |= __GFP_ACCOUNT; if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type); - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); xas_lock_type(xas, lock_type); } else { - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); } if (!xas->xa_alloc) return false; @@ -371,7 +371,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift) if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) { xas_set_err(xas, -ENOMEM); return NULL; @@ -1014,7 +1014,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, void *sibling = NULL; struct xa_node *node; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; node->array = xas->xa; diff --git a/mm/workingset.c b/mm/workingset.c index 8c03afe1d67c..979c7130c266 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -429,7 +429,7 @@ out: * point where they would still be useful. */ -static struct list_lru shadow_nodes; +struct list_lru shadow_nodes; void workingset_update_node(struct xa_node *node) { -- cgit v1.2.3-71-gd317 From 5abc1e37afa0335c52608d640fd30910b2eeda21 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:19 -0700 Subject: mm: list_lru: allocate list_lru_one only when needed In our server, we found a suspected memory leak problem. The kmalloc-32 consumes more than 6GB of memory. Other kmem_caches consume less than 2GB memory. After our in-depth analysis, the memory consumption of kmalloc-32 slab cache is the cause of list_lru_one allocation. crash> p memcg_nr_cache_ids memcg_nr_cache_ids = $2 = 24574 memcg_nr_cache_ids is very large and memory consumption of each list_lru can be calculated with the following formula. num_numa_node * memcg_nr_cache_ids * 32 (kmalloc-32) There are 4 numa nodes in our system, so each list_lru consumes ~3MB. crash> list super_blocks | wc -l 952 Every mount will register 2 list lrus, one is for inode, another is for dentry. There are 952 super_blocks. So the total memory is 952 * 2 * 3 MB (~5.6GB). But the number of memory cgroup is less than 500. So I guess more than 12286 containers have been deployed on this machine (I do not know why there are so many containers, it may be a user's bug or the user really want to do that). And memcg_nr_cache_ids has not been reduced to a suitable value. This can waste a lot of memory. Now the infrastructure for dynamic list_lru_one allocation is ready, so remove statically allocated memory code to save memory. Link: https://lkml.kernel.org/r/20220228122126.37293-11-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 7 +-- mm/list_lru.c | 121 ++++++++++++++++++++++++++--------------------- mm/memcontrol.c | 6 ++- 3 files changed, 77 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index ab912c49334f..c36db6dc2a65 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,14 +32,15 @@ struct list_lru_one { }; struct list_lru_per_memcg { + struct rcu_head rcu; /* array of per cgroup per node lists, indexed by node id */ - struct list_lru_one node[0]; + struct list_lru_one node[]; }; struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_per_memcg *mlru[]; + struct list_lru_per_memcg __rcu *mlru[]; }; struct list_lru_node { @@ -77,7 +78,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp); int memcg_update_all_list_lrus(int num_memcgs); -void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); +void memcg_drain_all_list_lrus(struct mem_cgroup *src, struct mem_cgroup *dst); /** * list_lru_add: add an element to the lru list's tail diff --git a/mm/list_lru.c b/mm/list_lru.c index bffa80527723..fc938d8ff48f 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -60,8 +60,12 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) * from relocation (see memcg_update_list_lru). */ mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock)); - if (mlrus && idx >= 0) - return &mlrus->mlru[idx]->node[nid]; + if (mlrus && idx >= 0) { + struct list_lru_per_memcg *mlru; + + mlru = rcu_dereference_check(mlrus->mlru[idx], true); + return mlru ? &mlru->node[nid] : NULL; + } return &nlru->lru; } @@ -188,7 +192,7 @@ unsigned long list_lru_count_one(struct list_lru *lru, rcu_read_lock(); l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg)); - count = READ_ONCE(l->nr_items); + count = l ? READ_ONCE(l->nr_items) : 0; rcu_read_unlock(); if (unlikely(count < 0)) @@ -217,8 +221,11 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, struct list_head *item, *n; unsigned long isolated = 0; - l = list_lru_from_memcg_idx(lru, nid, memcg_idx); restart: + l = list_lru_from_memcg_idx(lru, nid, memcg_idx); + if (!l) + goto out; + list_for_each_safe(item, n, &l->list) { enum lru_status ret; @@ -262,6 +269,7 @@ restart: BUG(); } } +out: return isolated; } @@ -354,20 +362,25 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) return mlru; } -static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus, - int begin, int end) +static void memcg_list_lru_free(struct list_lru *lru, int src_idx) { - int i; + struct list_lru_memcg *mlrus; + struct list_lru_per_memcg *mlru; - for (i = begin; i < end; i++) { - mlrus->mlru[i] = memcg_init_list_lru_one(GFP_KERNEL); - if (!mlrus->mlru[i]) - goto fail; - } - return 0; -fail: - memcg_destroy_list_lru_range(mlrus, begin, i); - return -ENOMEM; + spin_lock_irq(&lru->lock); + mlrus = rcu_dereference_protected(lru->mlrus, true); + mlru = rcu_dereference_protected(mlrus->mlru[src_idx], true); + rcu_assign_pointer(mlrus->mlru[src_idx], NULL); + spin_unlock_irq(&lru->lock); + + /* + * The __list_lru_walk_one() can walk the list of this node. + * We need kvfree_rcu() here. And the walking of the list + * is under lru->node[nid]->lock, which can serve as a RCU + * read-side critical section. + */ + if (mlru) + kvfree_rcu(mlru, rcu); } static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) @@ -381,14 +394,10 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) spin_lock_init(&lru->lock); - mlrus = kvmalloc(struct_size(mlrus, mlru, size), GFP_KERNEL); + mlrus = kvzalloc(struct_size(mlrus, mlru, size), GFP_KERNEL); if (!mlrus) return -ENOMEM; - if (memcg_init_list_lru_range(mlrus, 0, size)) { - kvfree(mlrus); - return -ENOMEM; - } RCU_INIT_POINTER(lru->mlrus, mlrus); return 0; @@ -422,13 +431,9 @@ static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_siz if (!new) return -ENOMEM; - if (memcg_init_list_lru_range(new, old_size, new_size)) { - kvfree(new); - return -ENOMEM; - } - spin_lock_irq(&lru->lock); memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size)); + memset(&new->mlru[old_size], 0, flex_array_size(new, mlru, new_size - old_size)); rcu_assign_pointer(lru->mlrus, new); spin_unlock_irq(&lru->lock); @@ -436,20 +441,6 @@ static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_siz return 0; } -static void memcg_cancel_update_list_lru(struct list_lru *lru, - int old_size, int new_size) -{ - struct list_lru_memcg *mlrus; - - mlrus = rcu_dereference_protected(lru->mlrus, - lockdep_is_held(&list_lrus_mutex)); - /* - * Do not bother shrinking the array back to the old size, because we - * cannot handle allocation failures here. - */ - memcg_destroy_list_lru_range(mlrus, old_size, new_size); -} - int memcg_update_all_list_lrus(int new_size) { int ret = 0; @@ -460,15 +451,10 @@ int memcg_update_all_list_lrus(int new_size) list_for_each_entry(lru, &memcg_list_lrus, list) { ret = memcg_update_list_lru(lru, old_size, new_size); if (ret) - goto fail; + break; } -out: mutex_unlock(&list_lrus_mutex); return ret; -fail: - list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list) - memcg_cancel_update_list_lru(lru, old_size, new_size); - goto out; } static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, @@ -485,6 +471,8 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(lru, nid, src_idx); + if (!src) + goto out; dst = list_lru_from_memcg_idx(lru, nid, dst_idx); list_splice_init(&src->list, &dst->list); @@ -494,7 +482,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; } - +out: spin_unlock_irq(&nlru->lock); } @@ -505,15 +493,41 @@ static void memcg_drain_list_lru(struct list_lru *lru, for_each_node(i) memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); + + memcg_list_lru_free(lru, src_idx); } -void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) +void memcg_drain_all_list_lrus(struct mem_cgroup *src, struct mem_cgroup *dst) { + struct cgroup_subsys_state *css; struct list_lru *lru; + int src_idx = src->kmemcg_id; + + /* + * Change kmemcg_id of this cgroup and all its descendants to the + * parent's id, and then move all entries from this cgroup's list_lrus + * to ones of the parent. + * + * After we have finished, all list_lrus corresponding to this cgroup + * are guaranteed to remain empty. So we can safely free this cgroup's + * list lrus in memcg_list_lru_free(). + * + * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc() + * from allocating list lrus for this cgroup after memcg_list_lru_free() + * call. + */ + rcu_read_lock(); + css_for_each_descendant_pre(css, &src->css) { + struct mem_cgroup *memcg; + + memcg = mem_cgroup_from_css(css); + memcg->kmemcg_id = dst->kmemcg_id; + } + rcu_read_unlock(); mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &memcg_list_lrus, list) - memcg_drain_list_lru(lru, src_idx, dst_memcg); + memcg_drain_list_lru(lru, src_idx, dst); mutex_unlock(&list_lrus_mutex); } @@ -528,7 +542,7 @@ static bool memcg_list_lru_allocated(struct mem_cgroup *memcg, return true; rcu_read_lock(); - allocated = !!rcu_dereference(lru->mlrus)->mlru[idx]; + allocated = !!rcu_access_pointer(rcu_dereference(lru->mlrus)->mlru[idx]); rcu_read_unlock(); return allocated; @@ -576,11 +590,12 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, mlrus = rcu_dereference_protected(lru->mlrus, true); while (i--) { int index = table[i].memcg->kmemcg_id; + struct list_lru_per_memcg *mlru = table[i].mlru; - if (mlrus->mlru[index]) - kfree(table[i].mlru); + if (index < 0 || rcu_dereference_protected(mlrus->mlru[index], true)) + kfree(mlru); else - mlrus->mlru[index] = table[i].mlru; + rcu_assign_pointer(mlrus->mlru[index], mlru); } spin_unlock_irqrestore(&lru->lock, flags); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f08a0dc2ac36..69c09efc599d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3709,6 +3709,10 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) memcg_reparent_objcgs(memcg, parent); + /* + * memcg_drain_all_list_lrus() can change memcg->kmemcg_id. + * Cache it to local @kmemcg_id. + */ kmemcg_id = memcg->kmemcg_id; /* @@ -3717,7 +3721,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) * The ordering is imposed by list_lru_node->lock taken by * memcg_drain_all_list_lrus(). */ - memcg_drain_all_list_lrus(kmemcg_id, parent); + memcg_drain_all_list_lrus(memcg, parent); memcg_free_cache_id(kmemcg_id); } -- cgit v1.2.3-71-gd317 From 1f391eb270791359ee79031945dbe3afeaec6ce3 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:22 -0700 Subject: mm: list_lru: rename memcg_drain_all_list_lrus to memcg_reparent_list_lrus The purpose of the memcg_drain_all_list_lrus() is list_lrus reparenting. It is very similar to memcg_reparent_objcgs(). Rename it to memcg_reparent_list_lrus() so that the name can more consistent with memcg_reparent_objcgs(). Link: https://lkml.kernel.org/r/20220228122126.37293-12-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 2 +- mm/list_lru.c | 24 ++++++++++++------------ mm/memcontrol.c | 6 +++--- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index c36db6dc2a65..4b00fd8cb373 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -78,7 +78,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp); int memcg_update_all_list_lrus(int num_memcgs); -void memcg_drain_all_list_lrus(struct mem_cgroup *src, struct mem_cgroup *dst); +void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); /** * list_lru_add: add an element to the lru list's tail diff --git a/mm/list_lru.c b/mm/list_lru.c index fc938d8ff48f..488dacd1f8ff 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -457,8 +457,8 @@ int memcg_update_all_list_lrus(int new_size) return ret; } -static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, - int src_idx, struct mem_cgroup *dst_memcg) +static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, + int src_idx, struct mem_cgroup *dst_memcg) { struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; @@ -486,22 +486,22 @@ out: spin_unlock_irq(&nlru->lock); } -static void memcg_drain_list_lru(struct list_lru *lru, - int src_idx, struct mem_cgroup *dst_memcg) +static void memcg_reparent_list_lru(struct list_lru *lru, + int src_idx, struct mem_cgroup *dst_memcg) { int i; for_each_node(i) - memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); + memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg); memcg_list_lru_free(lru, src_idx); } -void memcg_drain_all_list_lrus(struct mem_cgroup *src, struct mem_cgroup *dst) +void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent) { struct cgroup_subsys_state *css; struct list_lru *lru; - int src_idx = src->kmemcg_id; + int src_idx = memcg->kmemcg_id; /* * Change kmemcg_id of this cgroup and all its descendants to the @@ -517,17 +517,17 @@ void memcg_drain_all_list_lrus(struct mem_cgroup *src, struct mem_cgroup *dst) * call. */ rcu_read_lock(); - css_for_each_descendant_pre(css, &src->css) { - struct mem_cgroup *memcg; + css_for_each_descendant_pre(css, &memcg->css) { + struct mem_cgroup *child; - memcg = mem_cgroup_from_css(css); - memcg->kmemcg_id = dst->kmemcg_id; + child = mem_cgroup_from_css(css); + child->kmemcg_id = parent->kmemcg_id; } rcu_read_unlock(); mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &memcg_list_lrus, list) - memcg_drain_list_lru(lru, src_idx, dst); + memcg_reparent_list_lru(lru, src_idx, parent); mutex_unlock(&list_lrus_mutex); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 69c09efc599d..c36b0a0dbc19 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3710,7 +3710,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) memcg_reparent_objcgs(memcg, parent); /* - * memcg_drain_all_list_lrus() can change memcg->kmemcg_id. + * memcg_reparent_list_lrus() can change memcg->kmemcg_id. * Cache it to local @kmemcg_id. */ kmemcg_id = memcg->kmemcg_id; @@ -3719,9 +3719,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) * After we have finished memcg_reparent_objcgs(), all list_lrus * corresponding to this cgroup are guaranteed to remain empty. * The ordering is imposed by list_lru_node->lock taken by - * memcg_drain_all_list_lrus(). + * memcg_reparent_list_lrus(). */ - memcg_drain_all_list_lrus(memcg, parent); + memcg_reparent_list_lrus(memcg, parent); memcg_free_cache_id(kmemcg_id); } -- cgit v1.2.3-71-gd317 From bbca91cca9a902de2e9907370e9c1e0a3d1aab0f Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:25 -0700 Subject: mm: list_lru: replace linear array with xarray If we run 10k containers in the system, the size of the list_lru_memcg->lrus can be ~96KB per list_lru. When we decrease the number containers, the size of the array will not be shrinked. It is not scalable. The xarray is a good choice for this case. We can save a lot of memory when there are tens of thousands continers in the system. If we use xarray, we also can remove the logic code of resizing array, which can simplify the code. [akpm@linux-foundation.org: remove unused local] Link: https://lkml.kernel.org/r/20220228122126.37293-13-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 13 +-- include/linux/memcontrol.h | 23 ----- mm/list_lru.c | 203 +++++++++++++++------------------------------ mm/memcontrol.c | 77 ++--------------- 4 files changed, 73 insertions(+), 243 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 4b00fd8cb373..572c263561ac 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -11,6 +11,7 @@ #include #include #include +#include struct mem_cgroup; @@ -37,12 +38,6 @@ struct list_lru_per_memcg { struct list_lru_one node[]; }; -struct list_lru_memcg { - struct rcu_head rcu; - /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_per_memcg __rcu *mlru[]; -}; - struct list_lru_node { /* protects all lists on the node, including per cgroup */ spinlock_t lock; @@ -57,10 +52,7 @@ struct list_lru { struct list_head list; int shrinker_id; bool memcg_aware; - /* protects ->mlrus->mlru[i] */ - spinlock_t lock; - /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg __rcu *mlrus; + struct xarray xa; #endif }; @@ -77,7 +69,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp); -int memcg_update_all_list_lrus(int num_memcgs); void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); /** diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b636732f0c14..066b7a3b8a9e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1685,18 +1685,6 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_kmem_enabled_key; -extern int memcg_nr_cache_ids; -void memcg_get_cache_ids(void); -void memcg_put_cache_ids(void); - -/* - * Helper macro to loop through all memcg-specific caches. Callers must still - * check if the cache is valid (it is either valid or NULL). - * the slab_mutex must be held when looping through those caches - */ -#define for_each_memcg_cache_index(_idx) \ - for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) - static inline bool memcg_kmem_enabled(void) { return static_branch_likely(&memcg_kmem_enabled_key); @@ -1753,9 +1741,6 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order) { } -#define for_each_memcg_cache_index(_idx) \ - for (; NULL; ) - static inline bool memcg_kmem_enabled(void) { return false; @@ -1766,14 +1751,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline void memcg_get_cache_ids(void) -{ -} - -static inline void memcg_put_cache_ids(void) -{ -} - static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) { return NULL; diff --git a/mm/list_lru.c b/mm/list_lru.c index 488dacd1f8ff..3fdae1688f82 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -52,21 +52,12 @@ static int lru_shrinker_id(struct list_lru *lru) static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { - struct list_lru_memcg *mlrus; - struct list_lru_node *nlru = &lru->node[nid]; - - /* - * Either lock or RCU protects the array of per cgroup lists - * from relocation (see memcg_update_list_lru). - */ - mlrus = rcu_dereference_check(lru->mlrus, lockdep_is_held(&nlru->lock)); - if (mlrus && idx >= 0) { - struct list_lru_per_memcg *mlru; + if (list_lru_memcg_aware(lru) && idx >= 0) { + struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx); - mlru = rcu_dereference_check(mlrus->mlru[idx], true); return mlru ? &mlru->node[nid] : NULL; } - return &nlru->lru; + return &lru->node[nid].lru; } static inline struct list_lru_one * @@ -77,7 +68,7 @@ list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, struct list_lru_one *l = &nlru->lru; struct mem_cgroup *memcg = NULL; - if (!lru->mlrus) + if (!list_lru_memcg_aware(lru)) goto out; memcg = mem_cgroup_from_obj(ptr); @@ -309,16 +300,20 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, unsigned long *nr_to_walk) { long isolated = 0; - int memcg_idx; isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, nr_to_walk); + +#ifdef CONFIG_MEMCG_KMEM if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) { + struct list_lru_per_memcg *mlru; + unsigned long index; + + xa_for_each(&lru->xa, index, mlru) { struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); - isolated += __list_lru_walk_one(lru, nid, memcg_idx, + isolated += __list_lru_walk_one(lru, nid, index, isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); @@ -327,6 +322,8 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, break; } } +#endif + return isolated; } EXPORT_SYMBOL_GPL(list_lru_walk_node); @@ -338,15 +335,6 @@ static void init_one_lru(struct list_lru_one *l) } #ifdef CONFIG_MEMCG_KMEM -static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus, - int begin, int end) -{ - int i; - - for (i = begin; i < end; i++) - kfree(mlrus->mlru[i]); -} - static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) { int nid; @@ -364,14 +352,7 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) static void memcg_list_lru_free(struct list_lru *lru, int src_idx) { - struct list_lru_memcg *mlrus; - struct list_lru_per_memcg *mlru; - - spin_lock_irq(&lru->lock); - mlrus = rcu_dereference_protected(lru->mlrus, true); - mlru = rcu_dereference_protected(mlrus->mlru[src_idx], true); - rcu_assign_pointer(mlrus->mlru[src_idx], NULL); - spin_unlock_irq(&lru->lock); + struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); /* * The __list_lru_walk_one() can walk the list of this node. @@ -383,78 +364,27 @@ static void memcg_list_lru_free(struct list_lru *lru, int src_idx) kvfree_rcu(mlru, rcu); } -static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) +static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { - struct list_lru_memcg *mlrus; - int size = memcg_nr_cache_ids; - + if (memcg_aware) + xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ); lru->memcg_aware = memcg_aware; - if (!memcg_aware) - return 0; - - spin_lock_init(&lru->lock); - - mlrus = kvzalloc(struct_size(mlrus, mlru, size), GFP_KERNEL); - if (!mlrus) - return -ENOMEM; - - RCU_INIT_POINTER(lru->mlrus, mlrus); - - return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) { - struct list_lru_memcg *mlrus; + XA_STATE(xas, &lru->xa, 0); + struct list_lru_per_memcg *mlru; if (!list_lru_memcg_aware(lru)) return; - /* - * This is called when shrinker has already been unregistered, - * and nobody can use it. So, there is no need to use kvfree_rcu(). - */ - mlrus = rcu_dereference_protected(lru->mlrus, true); - memcg_destroy_list_lru_range(mlrus, 0, memcg_nr_cache_ids); - kvfree(mlrus); -} - -static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) -{ - struct list_lru_memcg *old, *new; - - BUG_ON(old_size > new_size); - - old = rcu_dereference_protected(lru->mlrus, - lockdep_is_held(&list_lrus_mutex)); - new = kvmalloc(struct_size(new, mlru, new_size), GFP_KERNEL); - if (!new) - return -ENOMEM; - - spin_lock_irq(&lru->lock); - memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size)); - memset(&new->mlru[old_size], 0, flex_array_size(new, mlru, new_size - old_size)); - rcu_assign_pointer(lru->mlrus, new); - spin_unlock_irq(&lru->lock); - - kvfree_rcu(old, rcu); - return 0; -} - -int memcg_update_all_list_lrus(int new_size) -{ - int ret = 0; - struct list_lru *lru; - int old_size = memcg_nr_cache_ids; - - mutex_lock(&list_lrus_mutex); - list_for_each_entry(lru, &memcg_list_lrus, list) { - ret = memcg_update_list_lru(lru, old_size, new_size); - if (ret) - break; + xas_lock_irq(&xas); + xas_for_each(&xas, mlru, ULONG_MAX) { + kfree(mlru); + xas_store(&xas, NULL); } - mutex_unlock(&list_lrus_mutex); - return ret; + xas_unlock_irq(&xas); } static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, @@ -521,7 +451,7 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren struct mem_cgroup *child; child = mem_cgroup_from_css(css); - child->kmemcg_id = parent->kmemcg_id; + WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id); } rcu_read_unlock(); @@ -531,21 +461,12 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren mutex_unlock(&list_lrus_mutex); } -static bool memcg_list_lru_allocated(struct mem_cgroup *memcg, - struct list_lru *lru) +static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg, + struct list_lru *lru) { - bool allocated; - int idx; - - idx = memcg->kmemcg_id; - if (unlikely(idx < 0)) - return true; + int idx = memcg->kmemcg_id; - rcu_read_lock(); - allocated = !!rcu_access_pointer(rcu_dereference(lru->mlrus)->mlru[idx]); - rcu_read_unlock(); - - return allocated; + return idx < 0 || xa_load(&lru->xa, idx); } int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, @@ -553,11 +474,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, { int i; unsigned long flags; - struct list_lru_memcg *mlrus; struct list_lru_memcg_table { struct list_lru_per_memcg *mlru; struct mem_cgroup *memcg; } *table; + XA_STATE(xas, &lru->xa, 0); if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) return 0; @@ -586,27 +507,48 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, } } - spin_lock_irqsave(&lru->lock, flags); - mlrus = rcu_dereference_protected(lru->mlrus, true); + xas_lock_irqsave(&xas, flags); while (i--) { - int index = table[i].memcg->kmemcg_id; + int index = READ_ONCE(table[i].memcg->kmemcg_id); struct list_lru_per_memcg *mlru = table[i].mlru; - if (index < 0 || rcu_dereference_protected(mlrus->mlru[index], true)) + xas_set(&xas, index); +retry: + if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { kfree(mlru); - else - rcu_assign_pointer(mlrus->mlru[index], mlru); + } else { + xas_store(&xas, mlru); + if (xas_error(&xas) == -ENOMEM) { + xas_unlock_irqrestore(&xas, flags); + if (xas_nomem(&xas, gfp)) + xas_set_err(&xas, 0); + xas_lock_irqsave(&xas, flags); + /* + * The xas lock has been released, this memcg + * can be reparented before us. So reload + * memcg id. More details see the comments + * in memcg_reparent_list_lrus(). + */ + index = READ_ONCE(table[i].memcg->kmemcg_id); + if (index < 0) + xas_set_err(&xas, 0); + else if (!xas_error(&xas) && index != xas.xa_index) + xas_set(&xas, index); + goto retry; + } + } } - spin_unlock_irqrestore(&lru->lock, flags); - + /* xas_nomem() is used to free memory instead of memory allocation. */ + if (xas.xa_alloc) + xas_nomem(&xas, gfp); + xas_unlock_irqrestore(&xas, flags); kfree(table); - return 0; + return xas_error(&xas); } #else -static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) +static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { - return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) @@ -618,7 +560,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key, struct shrinker *shrinker) { int i; - int err = -ENOMEM; #ifdef CONFIG_MEMCG_KMEM if (shrinker) @@ -626,11 +567,10 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, else lru->shrinker_id = -1; #endif - memcg_get_cache_ids(); lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); if (!lru->node) - goto out; + return -ENOMEM; for_each_node(i) { spin_lock_init(&lru->node[i].lock); @@ -639,18 +579,10 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, init_one_lru(&lru->node[i].lru); } - err = memcg_init_list_lru(lru, memcg_aware); - if (err) { - kfree(lru->node); - /* Do this so a list_lru_destroy() doesn't crash: */ - lru->node = NULL; - goto out; - } - + memcg_init_list_lru(lru, memcg_aware); list_lru_register(lru); -out: - memcg_put_cache_ids(); - return err; + + return 0; } EXPORT_SYMBOL_GPL(__list_lru_init); @@ -660,8 +592,6 @@ void list_lru_destroy(struct list_lru *lru) if (!lru->node) return; - memcg_get_cache_ids(); - list_lru_unregister(lru); memcg_destroy_list_lru(lru); @@ -671,6 +601,5 @@ void list_lru_destroy(struct list_lru *lru) #ifdef CONFIG_MEMCG_KMEM lru->shrinker_id = -1; #endif - memcg_put_cache_ids(); } EXPORT_SYMBOL_GPL(list_lru_destroy); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c36b0a0dbc19..68eb62d10c48 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -351,42 +351,17 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, * This will be used as a shrinker list's index. * The main reason for not using cgroup id for this: * this works better in sparse environments, where we have a lot of memcgs, - * but only a few kmem-limited. Or also, if we have, for instance, 200 - * memcgs, and none but the 200th is kmem-limited, we'd have to have a - * 200 entry array for that. - * - * The current size of the caches array is stored in memcg_nr_cache_ids. It - * will double each time we have to increase it. + * but only a few kmem-limited. */ static DEFINE_IDA(memcg_cache_ida); -int memcg_nr_cache_ids; - -/* Protects memcg_nr_cache_ids */ -static DECLARE_RWSEM(memcg_cache_ids_sem); - -void memcg_get_cache_ids(void) -{ - down_read(&memcg_cache_ids_sem); -} - -void memcg_put_cache_ids(void) -{ - up_read(&memcg_cache_ids_sem); -} /* - * MIN_SIZE is different than 1, because we would like to avoid going through - * the alloc/free process all the time. In a small machine, 4 kmem-limited - * cgroups is a reasonable guess. In the future, it could be a parameter or - * tunable, but that is strictly not necessary. - * * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get * this constant directly from cgroup, but it is understandable that this is * better kept as an internal representation in cgroup.c. In any case, the * cgrp_id space is not getting any smaller, and we don't have to necessarily * increase ours as well if it increases. */ -#define MEMCG_CACHES_MIN_SIZE 4 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX /* @@ -2944,49 +2919,6 @@ __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) return objcg; } -static int memcg_alloc_cache_id(void) -{ - int id, size; - int err; - - id = ida_simple_get(&memcg_cache_ida, - 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); - if (id < 0) - return id; - - if (id < memcg_nr_cache_ids) - return id; - - /* - * There's no space for the new id in memcg_caches arrays, - * so we have to grow them. - */ - down_write(&memcg_cache_ids_sem); - - size = 2 * (id + 1); - if (size < MEMCG_CACHES_MIN_SIZE) - size = MEMCG_CACHES_MIN_SIZE; - else if (size > MEMCG_CACHES_MAX_SIZE) - size = MEMCG_CACHES_MAX_SIZE; - - err = memcg_update_all_list_lrus(size); - if (!err) - memcg_nr_cache_ids = size; - - up_write(&memcg_cache_ids_sem); - - if (err) { - ida_simple_remove(&memcg_cache_ida, id); - return err; - } - return id; -} - -static void memcg_free_cache_id(int id) -{ - ida_simple_remove(&memcg_cache_ida, id); -} - static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) { mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); @@ -3673,13 +3605,14 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) if (unlikely(mem_cgroup_is_root(memcg))) return 0; - memcg_id = memcg_alloc_cache_id(); + memcg_id = ida_alloc_max(&memcg_cache_ida, MEMCG_CACHES_MAX_SIZE - 1, + GFP_KERNEL); if (memcg_id < 0) return memcg_id; objcg = obj_cgroup_alloc(); if (!objcg) { - memcg_free_cache_id(memcg_id); + ida_free(&memcg_cache_ida, memcg_id); return -ENOMEM; } objcg->memcg = memcg; @@ -3723,7 +3656,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) */ memcg_reparent_list_lrus(memcg, parent); - memcg_free_cache_id(kmemcg_id); + ida_free(&memcg_cache_ida, kmemcg_id); } #else static int memcg_online_kmem(struct mem_cgroup *memcg) -- cgit v1.2.3-71-gd317 From d70110704d2d52a65a9fe43be409d8c3acce79fe Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:35 -0700 Subject: mm: list_lru: rename list_lru_per_memcg to list_lru_memcg The name of list_lru_memcg was occupied before and became free since last commit. Rename list_lru_per_memcg to list_lru_memcg since the name is brief. Link: https://lkml.kernel.org/r/20220228122126.37293-16-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/list_lru.h | 2 +- mm/list_lru.c | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 572c263561ac..b35968ee9fb5 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,7 +32,7 @@ struct list_lru_one { long nr_items; }; -struct list_lru_per_memcg { +struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup per node lists, indexed by node id */ struct list_lru_one node[]; diff --git a/mm/list_lru.c b/mm/list_lru.c index 3fdae1688f82..1e740febc96e 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -53,7 +53,7 @@ static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { if (list_lru_memcg_aware(lru) && idx >= 0) { - struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx); + struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); return mlru ? &mlru->node[nid] : NULL; } @@ -306,7 +306,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, #ifdef CONFIG_MEMCG_KMEM if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - struct list_lru_per_memcg *mlru; + struct list_lru_memcg *mlru; unsigned long index; xa_for_each(&lru->xa, index, mlru) { @@ -335,10 +335,10 @@ static void init_one_lru(struct list_lru_one *l) } #ifdef CONFIG_MEMCG_KMEM -static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) +static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) { int nid; - struct list_lru_per_memcg *mlru; + struct list_lru_memcg *mlru; mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); if (!mlru) @@ -352,7 +352,7 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) static void memcg_list_lru_free(struct list_lru *lru, int src_idx) { - struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); + struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); /* * The __list_lru_walk_one() can walk the list of this node. @@ -374,7 +374,7 @@ static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) static void memcg_destroy_list_lru(struct list_lru *lru) { XA_STATE(xas, &lru->xa, 0); - struct list_lru_per_memcg *mlru; + struct list_lru_memcg *mlru; if (!list_lru_memcg_aware(lru)) return; @@ -475,7 +475,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, int i; unsigned long flags; struct list_lru_memcg_table { - struct list_lru_per_memcg *mlru; + struct list_lru_memcg *mlru; struct mem_cgroup *memcg; } *table; XA_STATE(xas, &lru->xa, 0); @@ -491,7 +491,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, /* * Because the list_lru can be reparented to the parent cgroup's * list_lru, we should make sure that this cgroup and all its - * ancestors have allocated list_lru_per_memcg. + * ancestors have allocated list_lru_memcg. */ for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { if (memcg_list_lru_allocated(memcg, lru)) @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, xas_lock_irqsave(&xas, flags); while (i--) { int index = READ_ONCE(table[i].memcg->kmemcg_id); - struct list_lru_per_memcg *mlru = table[i].mlru; + struct list_lru_memcg *mlru = table[i].mlru; xas_set(&xas, index); retry: -- cgit v1.2.3-71-gd317 From 7c52f65de40ff0e44f821559eb61931d368a4c48 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:41:38 -0700 Subject: mm: memcontrol: rename memcg_cache_id to memcg_kmem_id The memcg_cache_id() introduced by commit 2633d7a02823 ("slab/slub: consider a memcg parameter in kmem_create_cache") is used to index in the kmem_cache->memcg_params->memcg_caches array. Since kmem_cache->memcg_params.memcg_caches has been removed by commit 9855609bde03 ("mm: memcg/slab: use a single set of kmem_caches for all accounted allocations"). So the name does not need to reflect cache related. Just rename it to memcg_kmem_id. And it can reflect kmem related. Link: https://lkml.kernel.org/r/20220228122126.37293-17-songmuchun@bytedance.com Signed-off-by: Muchun Song Cc: Alex Shi Cc: Anna Schumaker Cc: Chao Yu Cc: Dave Chinner Cc: Fam Zheng Cc: Jaegeuk Kim Cc: Johannes Weiner Cc: Kari Argillander Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vladimir Davydov Cc: Vlastimil Babka Cc: Wei Yang Cc: Xiongchun Duan Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 ++-- mm/list_lru.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 066b7a3b8a9e..a68dce3873fc 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1708,7 +1708,7 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order) * A helper for accessing memcg's kmem_id, used for getting * corresponding LRU lists. */ -static inline int memcg_cache_id(struct mem_cgroup *memcg) +static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return memcg ? memcg->kmemcg_id : -1; } @@ -1746,7 +1746,7 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline int memcg_cache_id(struct mem_cgroup *memcg) +static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return -1; } diff --git a/mm/list_lru.c b/mm/list_lru.c index 1e740febc96e..ba76428ceece 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -75,7 +75,7 @@ list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, if (!memcg) goto out; - l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg)); + l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); out: if (memcg_ptr) *memcg_ptr = memcg; @@ -182,7 +182,7 @@ unsigned long list_lru_count_one(struct list_lru *lru, long count; rcu_read_lock(); - l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg)); + l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); count = l ? READ_ONCE(l->nr_items) : 0; rcu_read_unlock(); @@ -273,7 +273,7 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, unsigned long ret; spin_lock(&nlru->lock); - ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, + ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); return ret; @@ -289,7 +289,7 @@ list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, unsigned long ret; spin_lock_irq(&nlru->lock); - ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, + ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock_irq(&nlru->lock); return ret; -- cgit v1.2.3-71-gd317 From 16785bd7743104d57257a455001172b75afa7614 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 22 Mar 2022 14:41:47 -0700 Subject: mm: merge pte_mkhuge() call into arch_make_huge_pte() Each call into pte_mkhuge() is invariably followed by arch_make_huge_pte(). Instead arch_make_huge_pte() can accommodate pte_mkhuge() at the beginning. This updates generic fallback stub for arch_make_huge_pte() and available platforms definitions. This makes huge pte creation much cleaner and easier to follow. Link: https://lkml.kernel.org/r/1643860669-26307-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christophe Leroy Acked-by: Mike Kravetz Acked-by: Catalin Marinas Cc: Will Deacon Cc: Michael Ellerman Cc: Paul Mackerras Cc: "David S. Miller" Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/hugetlbpage.c | 1 + arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h | 4 ++-- arch/sparc/mm/hugetlbpage.c | 1 + include/linux/hugetlb.h | 2 +- mm/hugetlb.c | 3 +-- mm/vmalloc.c | 1 - 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index ffb9c229610a..228226c5fa80 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -347,6 +347,7 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { size_t pagesize = 1UL << shift; + entry = pte_mkhuge(entry); if (pagesize == CONT_PTE_SIZE) { entry = pte_mkcont(entry); } else if (pagesize == CONT_PMD_SIZE) { diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index 64b6c608eca4..de092b04ee1a 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -71,9 +71,9 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags size_t size = 1UL << shift; if (size == SZ_16K) - return __pte(pte_val(entry) & ~_PAGE_HUGE); + return __pte(pte_val(entry) | _PAGE_SPS); else - return entry; + return __pte(pte_val(entry) | _PAGE_SPS | _PAGE_HUGE); } #define arch_make_huge_pte arch_make_huge_pte #endif diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 0f49fada2093..d8e0e3c7038d 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -181,6 +181,7 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { pte_t pte; + entry = pte_mkhuge(entry); pte = hugepage_shift_to_tte(entry, shift); #ifdef CONFIG_SPARC64 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d1897a69c540..52c462390aee 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -754,7 +754,7 @@ static inline void arch_clear_hugepage_flags(struct page *page) { } static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { - return entry; + return pte_mkhuge(entry); } #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f294db835f4b..a404af0b49a0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4637,7 +4637,6 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); - entry = pte_mkhuge(entry); entry = arch_make_huge_pte(entry, shift, vma->vm_flags); return entry; @@ -6171,7 +6170,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned int shift = huge_page_shift(hstate_vma(vma)); old_pte = huge_ptep_modify_prot_start(vma, address, ptep); - pte = pte_mkhuge(huge_pte_modify(old_pte, newprot)); + pte = huge_pte_modify(old_pte, newprot); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); pages++; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4165304d3547..d0b14dd73adc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -118,7 +118,6 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (size != PAGE_SIZE) { pte_t entry = pfn_pte(pfn, prot); - entry = pte_mkhuge(entry); entry = arch_make_huge_pte(entry, ilog2(size), 0); set_huge_pte_at(&init_mm, addr, pte, entry); pfn += PFN_DOWN(size); -- cgit v1.2.3-71-gd317 From ff11a7ce1f0f8c1e7870de26860024b4ddbf5755 Mon Sep 17 00:00:00 2001 From: Bang Li Date: Tue, 22 Mar 2022 14:43:02 -0700 Subject: mm/vmalloc: fix comments about vmap_area struct The vmap_area_root should be in the "busy" tree and the free_vmap_area_root should be in the "free" tree. Link: https://lkml.kernel.org/r/20220305011510.33596-1-libang.linuxer@gmail.com Fixes: 688fcbfc06e4 ("mm/vmalloc: modify struct vmap_area to reduce its size") Signed-off-by: Bang Li Reviewed-by: Uladzislau Rezki (Sony) Cc: Pengfei Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 880227b9f044..05065915edd7 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -80,8 +80,8 @@ struct vmap_area { /* * The following two variables can be packed, because * a vmap_area object can be either: - * 1) in "free" tree (root is vmap_area_root) - * 2) or "busy" tree (root is free_vmap_area_root) + * 1) in "free" tree (root is free_vmap_area_root) + * 2) or "busy" tree (root is vmap_area_root) */ union { unsigned long subtree_max_size; /* in "free" tree */ -- cgit v1.2.3-71-gd317 From 1dd214b8f21ca46d5431be9b2db8513c59e07a26 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Tue, 22 Mar 2022 14:43:05 -0700 Subject: mm: page_alloc: avoid merging non-fallbackable pageblocks with others This is done in addition to MIGRATE_ISOLATE pageblock merge avoidance. It prepares for the upcoming removal of the MAX_ORDER-1 alignment requirement for CMA and alloc_contig_range(). MIGRATE_HIGHATOMIC should not merge with other migratetypes like MIGRATE_ISOLATE and MIGRARTE_CMA[1], so this commit prevents that too. Remove MIGRATE_CMA and MIGRATE_ISOLATE from fallbacks list, since they are never used. [1] https://lore.kernel.org/linux-mm/20211130100853.GP3366@techsingularity.net/ Link: https://lkml.kernel.org/r/20220124175957.1261961-1-zi.yan@sent.com Signed-off-by: Zi Yan Acked-by: Mel Gorman Acked-by: David Hildenbrand Acked-by: Vlastimil Babka Acked-by: Mike Rapoport Reviewed-by: Oscar Salvador Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 11 +++++++++++ mm/page_alloc.c | 44 +++++++++++++++++++++----------------------- 2 files changed, 32 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index aed44e9b5d89..71b77aab748d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -83,6 +83,17 @@ static inline bool is_migrate_movable(int mt) return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; } +/* + * Check whether a migratetype can be merged with another migratetype. + * + * It is only mergeable when it can fall back to other migratetypes for + * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. + */ +static inline bool migratetype_is_mergeable(int mt) +{ + return mt < MIGRATE_PCPTYPES; +} + #define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3589febc6d31..82f8226b1180 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1117,25 +1117,24 @@ continue_merging: } if (order < MAX_ORDER - 1) { /* If we are here, it means order is >= pageblock_order. - * We want to prevent merge between freepages on isolate - * pageblock and normal pageblock. Without this, pageblock - * isolation could cause incorrect freepage or CMA accounting. + * We want to prevent merge between freepages on pageblock + * without fallbacks and normal pageblock. Without this, + * pageblock isolation could cause incorrect freepage or CMA + * accounting or HIGHATOMIC accounting. * * We don't want to hit this code for the more frequent * low-order merging. */ - if (unlikely(has_isolate_pageblock(zone))) { - int buddy_mt; + int buddy_mt; - buddy_pfn = __find_buddy_pfn(pfn, order); - buddy = page + (buddy_pfn - pfn); - buddy_mt = get_pageblock_migratetype(buddy); + buddy_pfn = __find_buddy_pfn(pfn, order); + buddy = page + (buddy_pfn - pfn); + buddy_mt = get_pageblock_migratetype(buddy); - if (migratetype != buddy_mt - && (is_migrate_isolate(migratetype) || - is_migrate_isolate(buddy_mt))) - goto done_merging; - } + if (migratetype != buddy_mt + && (!migratetype_is_mergeable(migratetype) || + !migratetype_is_mergeable(buddy_mt))) + goto done_merging; max_order = order + 1; goto continue_merging; } @@ -2479,17 +2478,13 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, /* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted + * + * The other migratetypes do not have fallbacks. */ static int fallbacks[MIGRATE_TYPES][3] = { [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, -#ifdef CONFIG_CMA - [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ -#endif -#ifdef CONFIG_MEMORY_ISOLATION - [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ -#endif }; #ifdef CONFIG_CMA @@ -2795,8 +2790,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, /* Yoink! */ mt = get_pageblock_migratetype(page); - if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) - && !is_migrate_cma(mt)) { + /* Only reserve normal pageblocks (i.e., they can merge with others) */ + if (migratetype_is_mergeable(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); @@ -3545,8 +3540,11 @@ int __isolate_free_page(struct page *page, unsigned int order) struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { int mt = get_pageblock_migratetype(page); - if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) - && !is_migrate_highatomic(mt)) + /* + * Only change normal pageblocks (i.e., they can merge + * with others) + */ + if (migratetype_is_mergeable(mt)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); } -- cgit v1.2.3-71-gd317 From 7f37e49cbd60ef71b82d25cd55b039a65d06387c Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 22 Mar 2022 14:43:11 -0700 Subject: mm/mmzone.h: remove unused macros Remove pgdat_page_nr, nid_page_nr and NODE_MEM_MAP. They are unused now. Link: https://lkml.kernel.org/r/20220127093210.62293-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Reviewed-by: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 71b77aab748d..c9e6a50109b9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -931,12 +931,6 @@ typedef struct pglist_data { #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_FLATMEM -#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) -#else -#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) -#endif -#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) @@ -1112,7 +1106,6 @@ static inline struct pglist_data *NODE_DATA(int nid) { return &contig_page_data; } -#define NODE_MEM_MAP(nid) mem_map #else /* CONFIG_NUMA */ -- cgit v1.2.3-71-gd317 From e16faf26780fc0c8dd693ea9ee8420a7706cb2f5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 22 Mar 2022 14:43:17 -0700 Subject: cma: factor out minimum alignment requirement Patch series "mm: enforce pageblock_order < MAX_ORDER". Having pageblock_order >= MAX_ORDER seems to be able to happen in corner cases and some parts of the kernel are not prepared for it. For example, Aneesh has shown [1] that such kernels can be compiled on ppc64 with 64k base pages by setting FORCE_MAX_ZONEORDER=8, which will run into a WARN_ON_ONCE(order >= MAX_ORDER) in comapction code right during boot. We can get pageblock_order >= MAX_ORDER when the default hugetlb size is bigger than the maximum allocation granularity of the buddy, in which case we are no longer talking about huge pages but instead gigantic pages. Having pageblock_order >= MAX_ORDER can only make alloc_contig_range() of such gigantic pages more likely to succeed. Reliable use of gigantic pages either requires boot time allcoation or CMA, no need to overcomplicate some places in the kernel to optimize for corner cases that are broken in other areas of the kernel. This patch (of 2): Let's enforce pageblock_order < MAX_ORDER and simplify. Especially patch #1 can be regarded a cleanup before: [PATCH v5 0/6] Use pageblock_order for cma and alloc_contig_range alignment. [2] [1] https://lkml.kernel.org/r/87r189a2ks.fsf@linux.ibm.com [2] https://lkml.kernel.org/r/20220211164135.1803616-1-zi.yan@sent.com Link: https://lkml.kernel.org/r/20220214174132.219303-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Acked-by: Rob Herring Cc: Aneesh Kumar K.V Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Frank Rowand Cc: Michael S. Tsirkin Cc: Christoph Hellwig Cc: Marek Szyprowski Cc: Robin Murphy Cc: Minchan Kim Cc: Vlastimil Babka Cc: John Garry via iommu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/fadump-internal.h | 5 ----- arch/powerpc/kernel/fadump.c | 2 +- drivers/of/of_reserved_mem.c | 9 +++------ include/linux/cma.h | 9 +++++++++ kernel/dma/contiguous.c | 4 +--- mm/cma.c | 20 +++++--------------- 6 files changed, 19 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index 52189928ec08..81bcb9abb371 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -19,11 +19,6 @@ #define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt) -/* Alignment per CMA requirement. */ -#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \ - max_t(unsigned long, MAX_ORDER - 1, \ - pageblock_order)) - /* FAD commands */ #define FADUMP_REGISTER 1 #define FADUMP_UNREGISTER 2 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index d03e488cfe9c..7eb67201ea41 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -544,7 +544,7 @@ int __init fadump_reserve_mem(void) if (!fw_dump.nocma) { fw_dump.boot_memory_size = ALIGN(fw_dump.boot_memory_size, - FADUMP_CMA_ALIGNMENT); + CMA_MIN_ALIGNMENT_BYTES); } #endif diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 9c0fb962c22b..75caa6f5d36f 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "of_private.h" @@ -116,12 +117,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node, if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool") && of_get_flat_dt_prop(node, "reusable", NULL) - && !nomap) { - unsigned long order = - max_t(unsigned long, MAX_ORDER - 1, pageblock_order); - - align = max(align, (phys_addr_t)PAGE_SIZE << order); - } + && !nomap) + align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); if (prop) { diff --git a/include/linux/cma.h b/include/linux/cma.h index bd801023504b..75fe188ec4a1 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -20,6 +20,15 @@ #define CMA_MAX_NAME 64 +/* + * TODO: once the buddy -- especially pageblock merging and alloc_contig_range() + * -- can deal with only some pageblocks of a higher-order page being + * MIGRATE_CMA, we can use pageblock_nr_pages. + */ +#define CMA_MIN_ALIGNMENT_PAGES max_t(phys_addr_t, MAX_ORDER_NR_PAGES, \ + pageblock_nr_pages) +#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) + struct cma; extern unsigned long totalcma_pages; diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 3d63d91cba5c..6ea80ae42622 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -399,8 +399,6 @@ static const struct reserved_mem_ops rmem_cma_ops = { static int __init rmem_cma_setup(struct reserved_mem *rmem) { - phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); - phys_addr_t mask = align - 1; unsigned long node = rmem->fdt_node; bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); struct cma *cma; @@ -416,7 +414,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL; - if ((rmem->base & mask) || (rmem->size & mask)) { + if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) { pr_err("Reserved memory: incorrect alignment of CMA region\n"); return -EINVAL; } diff --git a/mm/cma.c b/mm/cma.c index bc9ca8f3c487..5a2cd5851658 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -168,7 +168,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma) { struct cma *cma; - phys_addr_t alignment; /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { @@ -179,15 +178,12 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; - /* ensure minimal alignment required by mm core */ - alignment = PAGE_SIZE << - max_t(unsigned long, MAX_ORDER - 1, pageblock_order); - /* alignment should be aligned with order_per_bit */ - if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) + if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) return -EINVAL; - if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + /* ensure minimal alignment required by mm core */ + if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) return -EINVAL; /* @@ -262,14 +258,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, if (alignment && !is_power_of_2(alignment)) return -EINVAL; - /* - * Sanitise input arguments. - * Pages both ends in CMA area could be merged into adjacent unmovable - * migratetype page by page allocator's buddy algorithm. In the case, - * you couldn't get a contiguous memory, which is not what we want. - */ - alignment = max(alignment, (phys_addr_t)PAGE_SIZE << - max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); + /* Sanitise input arguments. */ + alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); if (fixed && base & (alignment - 1)) { ret = -EINVAL; pr_err("Region at %pa must be aligned to %pa bytes\n", -- cgit v1.2.3-71-gd317 From b3d40a2b6d10c9d0424d2b398bf962fb6adad87e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 22 Mar 2022 14:43:20 -0700 Subject: mm: enforce pageblock_order < MAX_ORDER Some places in the kernel don't really expect pageblock_order >= MAX_ORDER, and it looks like this is only possible in corner cases: 1) CONFIG_DEFERRED_STRUCT_PAGE_INIT we'll end up freeing pageblock_order pages via __free_pages_core(), which cannot possibly work. 2) find_zone_movable_pfns_for_nodes() will roundup the ZONE_MOVABLE start PFN to MAX_ORDER_NR_PAGES. Consequently with a bigger pageblock_order, we could have a single pageblock partially managed by two zones. 3) compaction code runs into __fragmentation_index() with order >= MAX_ORDER, when checking WARN_ON_ONCE(order >= MAX_ORDER). [1] 4) mm/page_reporting.c won't be reporting any pages with default page_reporting_order == pageblock_order, as we'll be skipping the reporting loop inside page_reporting_process_zone(). 5) __rmqueue_fallback() will never be able to steal with ALLOC_NOFRAGMENT. pageblock_order >= MAX_ORDER is weird either way: it's a pure optimization for making alloc_contig_range(), as used for allcoation of gigantic pages, a little more reliable to succeed. However, if there is demand for somewhat reliable allocation of gigantic pages, affected setups should be using CMA or boottime allocations instead. So let's make sure that pageblock_order < MAX_ORDER and simplify. [1] https://lkml.kernel.org/r/87r189a2ks.fsf@linux.ibm.com Link: https://lkml.kernel.org/r/20220214174132.219303-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Cc: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Christoph Hellwig Cc: Frank Rowand Cc: John Garry via iommu Cc: Marek Szyprowski Cc: Michael Ellerman Cc: Michael S. Tsirkin Cc: Minchan Kim Cc: Paul Mackerras Cc: Rob Herring Cc: Robin Murphy Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_mem.c | 9 +++------ include/linux/cma.h | 3 +-- include/linux/pageblock-flags.h | 7 +++++-- mm/Kconfig | 3 +++ mm/page_alloc.c | 32 ++++++++------------------------ 5 files changed, 20 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 38becd8d578c..e7d6b679596d 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -2476,13 +2476,10 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm) VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD); /* - * We want subblocks to span at least MAX_ORDER_NR_PAGES and - * pageblock_nr_pages pages. This: - * - Is required for now for alloc_contig_range() to work reliably - - * it doesn't properly handle smaller granularity on ZONE_NORMAL. + * TODO: once alloc_contig_range() works reliably with pageblock + * granularity on ZONE_NORMAL, use pageblock_nr_pages instead. */ - sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES, - pageblock_nr_pages) * PAGE_SIZE; + sb_size = PAGE_SIZE * MAX_ORDER_NR_PAGES; sb_size = max_t(uint64_t, vm->device_block_size, sb_size); if (sb_size < memory_block_size_bytes() && !force_bbm) { diff --git a/include/linux/cma.h b/include/linux/cma.h index 75fe188ec4a1..b1ba94f1cc9c 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -25,8 +25,7 @@ * -- can deal with only some pageblocks of a higher-order page being * MIGRATE_CMA, we can use pageblock_nr_pages. */ -#define CMA_MIN_ALIGNMENT_PAGES max_t(phys_addr_t, MAX_ORDER_NR_PAGES, \ - pageblock_nr_pages) +#define CMA_MIN_ALIGNMENT_PAGES MAX_ORDER_NR_PAGES #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) struct cma; diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 973fd731a520..83c7248053a1 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -37,8 +37,11 @@ extern unsigned int pageblock_order; #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ -/* Huge pages are a constant size */ -#define pageblock_order HUGETLB_PAGE_ORDER +/* + * Huge pages are a constant size, but don't exceed the maximum allocation + * granularity. + */ +#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER - 1) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ diff --git a/mm/Kconfig b/mm/Kconfig index 3326ee3903f3..4c91b92e7537 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -262,6 +262,9 @@ config HUGETLB_PAGE_SIZE_VARIABLE HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available on a platform. + Note that the pageblock_order cannot exceed MAX_ORDER - 1 and will be + clamped down to MAX_ORDER - 1. + config CONTIG_ALLOC def_bool (MEMORY_ISOLATION && COMPACTION) || CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 06c683754334..a4efbdad2f2b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1072,14 +1072,12 @@ static inline void __free_one_page(struct page *page, int migratetype, fpi_t fpi_flags) { struct capture_control *capc = task_capc(zone); + unsigned int max_order = pageblock_order; unsigned long buddy_pfn; unsigned long combined_pfn; - unsigned int max_order; struct page *buddy; bool to_tail; - max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); - VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); @@ -2259,19 +2257,8 @@ void __init init_cma_reserved_pageblock(struct page *page) } while (++p, --i); set_pageblock_migratetype(page, MIGRATE_CMA); - - if (pageblock_order >= MAX_ORDER) { - i = pageblock_nr_pages; - p = page; - do { - set_page_refcounted(p); - __free_pages(p, MAX_ORDER - 1); - p += MAX_ORDER_NR_PAGES; - } while (i -= MAX_ORDER_NR_PAGES); - } else { - set_page_refcounted(page); - __free_pages(page, pageblock_order); - } + set_page_refcounted(page); + __free_pages(page, pageblock_order); adjust_managed_page_count(page, pageblock_nr_pages); page_zone(page)->cma_pages += pageblock_nr_pages; @@ -7382,16 +7369,15 @@ static inline void setup_usemap(struct zone *zone) {} /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ void __init set_pageblock_order(void) { - unsigned int order; + unsigned int order = MAX_ORDER - 1; /* Check that pageblock_nr_pages has not already been setup */ if (pageblock_order) return; - if (HPAGE_SHIFT > PAGE_SHIFT) + /* Don't let pageblocks exceed the maximum allocation granularity. */ + if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) order = HUGETLB_PAGE_ORDER; - else - order = MAX_ORDER - 1; /* * Assume the largest contiguous order of interest is a huge page. @@ -8979,14 +8965,12 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, #ifdef CONFIG_CONTIG_ALLOC static unsigned long pfn_max_align_down(unsigned long pfn) { - return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, - pageblock_nr_pages) - 1); + return ALIGN_DOWN(pfn, MAX_ORDER_NR_PAGES); } static unsigned long pfn_max_align_up(unsigned long pfn) { - return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, - pageblock_nr_pages)); + return ALIGN(pfn, MAX_ORDER_NR_PAGES); } #if defined(CONFIG_DYNAMIC_DEBUG) || \ -- cgit v1.2.3-71-gd317 From 1ca75fa7f19d694c58af681fa023295072b03120 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 22 Mar 2022 14:43:51 -0700 Subject: arch/x86/mm/numa: Do not initialize nodes twice On x86, prior to ("mm: handle uninitialized numa nodes gracecully"), NUMA nodes could be allocated at three different places. - numa_register_memblks - init_cpu_to_node - init_gi_nodes All these calls happen at setup_arch, and have the following order: setup_arch ... x86_numa_init numa_init numa_register_memblks ... init_cpu_to_node init_memory_less_node alloc_node_data free_area_init_memoryless_node init_gi_nodes init_memory_less_node alloc_node_data free_area_init_memoryless_node numa_register_memblks() is only interested in those nodes which have memory, so it skips over any memoryless node it founds. Later on, when we have read ACPI's SRAT table, we call init_cpu_to_node() and init_gi_nodes(), which initialize any memoryless node we might have that have either CPU or Initiator affinity, meaning we allocate pg_data_t struct for them and we mark them as ONLINE. So far so good, but the thing is that after ("mm: handle uninitialized numa nodes gracefully"), we allocate all possible NUMA nodes in free_area_init(), meaning we have a picture like the following: setup_arch x86_numa_init numa_init numa_register_memblks <-- allocate non-memoryless node x86_init.paging.pagetable_init ... free_area_init free_area_init_memoryless <-- allocate memoryless node init_cpu_to_node alloc_node_data <-- allocate memoryless node with CPU free_area_init_memoryless_node init_gi_nodes alloc_node_data <-- allocate memoryless node with Initiator free_area_init_memoryless_node free_area_init() already allocates all possible NUMA nodes, but init_cpu_to_node() and init_gi_nodes() are clueless about that, so they go ahead and allocate a new pg_data_t struct without checking anything, meaning we end up allocating twice. It should be mad clear that this only happens in the case where memoryless NUMA node happens to have a CPU/Initiator affinity. So get rid of init_memory_less_node() and just set the node online. Note that setting the node online is needed, otherwise we choke down the chain when bringup_nonboot_cpus() ends up calling __try_online_node()->register_one_node()->... and we blow up in bus_add_device(). As can be seen here: BUG: kernel NULL pointer dereference, address: 0000000000000060 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC PTI CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.17.0-rc4-1-default+ #45 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/4 RIP: 0010:bus_add_device+0x5a/0x140 Code: 8b 74 24 20 48 89 df e8 84 96 ff ff 85 c0 89 c5 75 38 48 8b 53 50 48 85 d2 0f 84 bb 00 004 RSP: 0000:ffffc9000022bd10 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff888100987400 RCX: ffff8881003e4e19 RDX: ffff8881009a5e00 RSI: ffff888100987400 RDI: ffff888100987400 RBP: 0000000000000000 R08: ffff8881003e4e18 R09: ffff8881003e4c98 R10: 0000000000000000 R11: ffff888100402bc0 R12: ffffffff822ceba0 R13: 0000000000000000 R14: ffff888100987400 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88853fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000060 CR3: 000000000200a001 CR4: 00000000001706b0 Call Trace: device_add+0x4c0/0x910 __register_one_node+0x97/0x2d0 __try_online_node+0x85/0xc0 try_online_node+0x25/0x40 cpu_up+0x4f/0x100 bringup_nonboot_cpus+0x4f/0x60 smp_init+0x26/0x79 kernel_init_freeable+0x130/0x2f1 kernel_init+0x17/0x150 ret_from_fork+0x22/0x30 The reason is simple, by the time bringup_nonboot_cpus() gets called, we did not register the node_subsys bus yet, so we crash when bus_add_device() tries to dereference bus()->p. The following shows the order of the calls: kernel_init_freeable smp_init bringup_nonboot_cpus ... bus_add_device() <- we did not register node_subsys yet do_basic_setup do_initcalls postcore_initcall(register_node_type); register_node_type subsys_system_register subsys_register bus_register <- register node_subsys bus Why setting the node online saves us then? Well, simply because __try_online_node() backs off when the node is online, meaning we do not end up calling register_one_node() in the first place. This is subtle, broken and deserves a deep analysis and thought about how to put this into shape, but for now let us have this easy fix for the leaking memory issue. [osalvador@suse.de: add comments] Link: https://lkml.kernel.org/r/20220221142649.3457-1-osalvador@suse.de Link: https://lkml.kernel.org/r/20220218224302.5282-2-osalvador@suse.de Fixes: da4490c958ad ("mm: handle uninitialized numa nodes gracefully") Signed-off-by: Oscar Salvador Acked-by: Michal Hocko Cc: David Hildenbrand Cc: Rafael Aquini Cc: Dave Hansen Cc: Wei Yang Cc: Dennis Zhou Cc: Alexey Makhalov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/numa.c | 33 ++++++++++++++++++++------------- include/linux/mm.h | 1 - mm/page_alloc.c | 2 +- 3 files changed, 21 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index c6b1213086d6..e8b061557887 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -738,17 +738,6 @@ void __init x86_numa_init(void) numa_init(dummy_numa_init); } -static void __init init_memory_less_node(int nid) -{ - /* Allocate and initialize node data. Memory-less node is now online.*/ - alloc_node_data(nid); - free_area_init_memoryless_node(nid); - - /* - * All zonelists will be built later in start_kernel() after per cpu - * areas are initialized. - */ -} /* * A node may exist which has one or more Generic Initiators but no CPUs and no @@ -766,9 +755,18 @@ void __init init_gi_nodes(void) { int nid; + /* + * Exclude this node from + * bringup_nonboot_cpus + * cpu_up + * __try_online_node + * register_one_node + * because node_subsys is not initialized yet. + * TODO remove dependency on node_online + */ for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid)) - init_memory_less_node(nid); + node_set_online(nid); } /* @@ -798,8 +796,17 @@ void __init init_cpu_to_node(void) if (node == NUMA_NO_NODE) continue; + /* + * Exclude this node from + * bringup_nonboot_cpus + * cpu_up + * __try_online_node + * register_one_node + * because node_subsys is not initialized yet. + * TODO remove dependency on node_online + */ if (!node_online(node)) - init_memory_less_node(node); + node_set_online(node); numa_set_node(cpu, node); } diff --git a/include/linux/mm.h b/include/linux/mm.h index c02a8cc16e4f..d0978235775f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2449,7 +2449,6 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) } extern void __init pagecache_init(void); -extern void __init free_area_init_memoryless_node(int nid); extern void free_initmem(void); /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 279852eae9db..7d24522b3b54 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7626,7 +7626,7 @@ static void __init free_area_init_node(int nid) free_area_init_core(pgdat); } -void __init free_area_init_memoryless_node(int nid) +static void __init free_area_init_memoryless_node(int nid) { free_area_init_node(nid); } -- cgit v1.2.3-71-gd317 From 888af2701db79b9b27c7e37f9ede528a5ca53b76 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 22 Mar 2022 14:44:44 -0700 Subject: mm/memory-failure.c: fix race with changing page compound again Patch series "A few fixup patches for memory failure", v2. This series contains a few patches to fix the race with changing page compound page, make non-LRU movable pages unhandlable and so on. More details can be found in the respective changelogs. There is a race window where we got the compound_head, the hugetlb page could be freed to buddy, or even changed to another compound page just before we try to get hwpoison page. Think about the below race window: CPU 1 CPU 2 memory_failure_hugetlb struct page *head = compound_head(p); hugetlb page might be freed to buddy, or even changed to another compound page. get_hwpoison_page -- page is not what we want now... If this race happens, just bail out. Also MF_MSG_DIFFERENT_PAGE_SIZE is introduced to record this event. [akpm@linux-foundation.org: s@/**@/*@, per Naoya Horiguchi] Link: https://lkml.kernel.org/r/20220312074613.4798-1-linmiaohe@huawei.com Link: https://lkml.kernel.org/r/20220312074613.4798-2-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Naoya Horiguchi Cc: Tony Luck Cc: Borislav Petkov Cc: Mike Kravetz Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + include/ras/ras_event.h | 1 + mm/memory-failure.c | 12 ++++++++++++ 3 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index d0978235775f..45a449e8c209 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3239,6 +3239,7 @@ enum mf_action_page_type { MF_MSG_BUDDY, MF_MSG_DAX, MF_MSG_UNSPLIT_THP, + MF_MSG_DIFFERENT_PAGE_SIZE, MF_MSG_UNKNOWN, }; diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index d0337a41141c..1e694fd239b9 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -374,6 +374,7 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_BUDDY, "free buddy page" ) \ EM ( MF_MSG_DAX, "dax page" ) \ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \ + EM ( MF_MSG_DIFFERENT_PAGE_SIZE, "different page size" ) \ EMe ( MF_MSG_UNKNOWN, "unknown page" ) /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 086ae4eb3421..7ec855149393 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -732,6 +732,7 @@ static const char * const action_page_types[] = { [MF_MSG_BUDDY] = "free buddy page", [MF_MSG_DAX] = "dax page", [MF_MSG_UNSPLIT_THP] = "unsplit thp", + [MF_MSG_DIFFERENT_PAGE_SIZE] = "different page size", [MF_MSG_UNKNOWN] = "unknown page", }; @@ -1532,6 +1533,17 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) } lock_page(head); + + /* + * The page could have changed compound pages due to race window. + * If this happens just bail out. + */ + if (!PageHuge(p) || compound_head(p) != head) { + action_result(pfn, MF_MSG_DIFFERENT_PAGE_SIZE, MF_IGNORED); + res = -EBUSY; + goto out; + } + page_flags = head->flags; if (hwpoison_filter(p)) { -- cgit v1.2.3-71-gd317 From 1e7a8181640a620300e98e22223ca3445b349840 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 22 Mar 2022 14:44:53 -0700 Subject: mm, fault-injection: declare should_fail_alloc_page() The mm/ directory can almost fully be built with W=1, which would help in local development. One remaining issue is missing prototype for should_fail_alloc_page(). Thus add it next to the should_failslab() prototype. Note the previous attempt by commit f7173090033c ("mm/page_alloc: make should_fail_alloc_page() static") had to be reverted by commit 54aa386661fe as it caused an unresolved symbol error with CONFIG_DEBUG_INFO_BTF=y Link: https://lkml.kernel.org/r/20220314165724.16071-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka Cc: Mel Gorman Cc: Matthew Wilcox Cc: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fault-inject.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index e525f6957c49..2d04f6448cde 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -64,6 +64,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, struct kmem_cache; +bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); + int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); -- cgit v1.2.3-71-gd317 From e7d324850bfcb30df563d144c0363cc44595277d Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:45:00 -0700 Subject: mm: hugetlb: free the 2nd vmemmap page associated with each HugeTLB page Patch series "Free the 2nd vmemmap page associated with each HugeTLB page", v7. This series can minimize the overhead of struct page for 2MB HugeTLB pages significantly. It further reduces the overhead of struct page by 12.5% for a 2MB HugeTLB compared to the previous approach, which means 2GB per 1TB HugeTLB. It is a nice gain. Comments and reviews are welcome. Thanks. The main implementation and details can refer to the commit log of patch 1. In this series, I have changed the following four helpers, the following table shows the impact of the overhead of those helpers. +------------------+-----------------------+ | APIs | head page | tail page | +------------------+-----------+-----------+ | PageHead() | Y | N | +------------------+-----------+-----------+ | PageTail() | Y | N | +------------------+-----------+-----------+ | PageCompound() | N | N | +------------------+-----------+-----------+ | compound_head() | Y | N | +------------------+-----------+-----------+ Y: Overhead is increased. N: Overhead is _NOT_ increased. It shows that the overhead of those helpers on a tail page don't change between "hugetlb_free_vmemmap=on" and "hugetlb_free_vmemmap=off". But the overhead on a head page will be increased when "hugetlb_free_vmemmap=on" (except PageCompound()). So I believe that Matthew Wilcox's folio series will help with this. The users of PageHead() and PageTail() are much less than compound_head() and most users of PageTail() are VM_BUG_ON(), so I have done some tests about the overhead of compound_head() on head pages. I have tested the overhead of calling compound_head() on a head page, which is 2.11ns (Measure the call time of 10 million times compound_head(), and then average). For a head page whose address is not aligned with PAGE_SIZE or a non-compound page, the overhead of compound_head() is 2.54ns which is increased by 20%. For a head page whose address is aligned with PAGE_SIZE, the overhead of compound_head() is 2.97ns which is increased by 40%. Most pages are the former. I do not think the overhead is significant since the overhead of compound_head() itself is low. This patch (of 5): This patch minimizes the overhead of struct page for 2MB HugeTLB pages significantly. It further reduces the overhead of struct page by 12.5% for a 2MB HugeTLB compared to the previous approach, which means 2GB per 1TB HugeTLB (2MB type). After the feature of "Free sonme vmemmap pages of HugeTLB page" is enabled, the mapping of the vmemmap addresses associated with a 2MB HugeTLB page becomes the figure below. HugeTLB struct pages(8 pages) page frame(8 pages) +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+---> PG_head | | | 0 | -------------> | 0 | | | +-----------+ +-----------+ | | | 1 | -------------> | 1 | | | +-----------+ +-----------+ | | | 2 | ----------------^ ^ ^ ^ ^ ^ | | +-----------+ | | | | | | | | 3 | ------------------+ | | | | | | +-----------+ | | | | | | | 4 | --------------------+ | | | | 2MB | +-----------+ | | | | | | 5 | ----------------------+ | | | | +-----------+ | | | | | 6 | ------------------------+ | | | +-----------+ | | | | 7 | --------------------------+ | | +-----------+ | | | | | | +-----------+ As we can see, the 2nd vmemmap page frame (indexed by 1) is reused and remaped. However, the 2nd vmemmap page frame is also can be freed to the buddy allocator, then we can change the mapping from the figure above to the figure below. HugeTLB struct pages(8 pages) page frame(8 pages) +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+---> PG_head | | | 0 | -------------> | 0 | | | +-----------+ +-----------+ | | | 1 | ---------------^ ^ ^ ^ ^ ^ ^ | | +-----------+ | | | | | | | | | 2 | -----------------+ | | | | | | | +-----------+ | | | | | | | | 3 | -------------------+ | | | | | | +-----------+ | | | | | | | 4 | ---------------------+ | | | | 2MB | +-----------+ | | | | | | 5 | -----------------------+ | | | | +-----------+ | | | | | 6 | -------------------------+ | | | +-----------+ | | | | 7 | ---------------------------+ | | +-----------+ | | | | | | +-----------+ After we do this, all tail vmemmap pages (1-7) are mapped to the head vmemmap page frame (0). In other words, there are more than one page struct with PG_head associated with each HugeTLB page. We __know__ that there is only one head page struct, the tail page structs with PG_head are fake head page structs. We need an approach to distinguish between those two different types of page structs so that compound_head(), PageHead() and PageTail() can work properly if the parameter is the tail page struct but with PG_head. The following code snippet describes how to distinguish between real and fake head page struct. if (test_bit(PG_head, &page->flags)) { unsigned long head = READ_ONCE(page[1].compound_head); if (head & 1) { if (head == (unsigned long)page + 1) ==> head page struct else ==> tail page struct } else ==> head page struct } We can safely access the field of the @page[1] with PG_head because the @page is a compound page composed with at least two contiguous pages. [songmuchun@bytedance.com: restore lost comment changes] Link: https://lkml.kernel.org/r/20211101031651.75851-1-songmuchun@bytedance.com Link: https://lkml.kernel.org/r/20211101031651.75851-2-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Barry Song Cc: Mike Kravetz Cc: Oscar Salvador Cc: Michal Hocko Cc: David Hildenbrand Cc: Chen Huang Cc: Bodeddula Balasubramaniam Cc: Jonathan Corbet Cc: Matthew Wilcox Cc: Xiongchun Duan Cc: Fam Zheng Cc: Qi Zheng Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 2 +- include/linux/page-flags.h | 78 +++++++++++++++++++++++-- mm/hugetlb_vmemmap.c | 62 +++++++++++--------- mm/sparse-vmemmap.c | 21 +++++++ 4 files changed, 130 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 7123524a86b8..bc39497f5788 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1625,7 +1625,7 @@ [KNL] Reguires CONFIG_HUGETLB_PAGE_FREE_VMEMMAP enabled. Allows heavy hugetlb users to free up some more - memory (6 * PAGE_SIZE for each 2MB hugetlb page). + memory (7 * PAGE_SIZE for each 2MB hugetlb page). Format: { on | off (default) } on: enable the feature diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1c3b6e5c8bfd..111e453f23d2 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -190,13 +190,69 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +extern bool hugetlb_free_vmemmap_enabled; + +/* + * If the feature of freeing some vmemmap pages associated with each HugeTLB + * page is enabled, the head vmemmap page frame is reused and all of the tail + * vmemmap addresses map to the head vmemmap page frame (furture details can + * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other + * words, there are more than one page struct with PG_head associated with each + * HugeTLB page. We __know__ that there is only one head page struct, the tail + * page structs with PG_head are fake head page structs. We need an approach + * to distinguish between those two different types of page structs so that + * compound_head() can return the real head page struct when the parameter is + * the tail page struct but with PG_head. + * + * The page_fixed_fake_head() returns the real head page struct if the @page is + * fake page head, otherwise, returns @page which can either be a true page + * head or tail. + */ +static __always_inline const struct page *page_fixed_fake_head(const struct page *page) +{ + if (!hugetlb_free_vmemmap_enabled) + return page; + + /* + * Only addresses aligned with PAGE_SIZE of struct page may be fake head + * struct page. The alignment check aims to avoid access the fields ( + * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) + * cold cacheline in some cases. + */ + if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && + test_bit(PG_head, &page->flags)) { + /* + * We can safely access the field of the @page[1] with PG_head + * because the @page is a compound page composed with at least + * two contiguous pages. + */ + unsigned long head = READ_ONCE(page[1].compound_head); + + if (likely(head & 1)) + return (const struct page *)(head - 1); + } + return page; +} +#else +static inline const struct page *page_fixed_fake_head(const struct page *page) +{ + return page; +} +#endif + +static __always_inline int page_is_fake_head(struct page *page) +{ + return page_fixed_fake_head(page) != page; +} + static inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) return head - 1; - return (unsigned long)page; + return (unsigned long)page_fixed_fake_head(page); } #define compound_head(page) ((typeof(page))_compound_head(page)) @@ -231,12 +287,13 @@ static inline unsigned long _compound_head(const struct page *page) static __always_inline int PageTail(struct page *page) { - return READ_ONCE(page->compound_head) & 1; + return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); } static __always_inline int PageCompound(struct page *page) { - return test_bit(PG_head, &page->flags) || PageTail(page); + return test_bit(PG_head, &page->flags) || + READ_ONCE(page->compound_head) & 1; } #define PAGE_POISON_PATTERN -1l @@ -695,7 +752,20 @@ static inline bool test_set_page_writeback(struct page *page) return set_page_writeback(page); } -__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) +static __always_inline bool folio_test_head(struct folio *folio) +{ + return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); +} + +static __always_inline int PageHead(struct page *page) +{ + PF_POISONED_CHECK(page); + return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); +} + +__SETPAGEFLAG(Head, head, PF_ANY) +__CLEARPAGEFLAG(Head, head, PF_ANY) +CLEARPAGEFLAG(Head, head, PF_ANY) /** * folio_test_large() - Does this folio contain more than one page? diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index c540c21e26f5..4977f5a520c2 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -124,9 +124,9 @@ * page of page structs (page 0) associated with the HugeTLB page contains the 4 * page structs necessary to describe the HugeTLB. The only use of the remaining * pages of page structs (page 1 to page 7) is to point to page->compound_head. - * Therefore, we can remap pages 2 to 7 to page 1. Only 2 pages of page structs + * Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of page structs * will be used for each HugeTLB page. This will allow us to free the remaining - * 6 pages to the buddy allocator. + * 7 pages to the buddy allocator. * * Here is how things look after remapping. * @@ -134,30 +134,30 @@ * +-----------+ ---virt_to_page---> +-----------+ mapping to +-----------+ * | | | 0 | -------------> | 0 | * | | +-----------+ +-----------+ - * | | | 1 | -------------> | 1 | - * | | +-----------+ +-----------+ - * | | | 2 | ----------------^ ^ ^ ^ ^ ^ - * | | +-----------+ | | | | | - * | | | 3 | ------------------+ | | | | - * | | +-----------+ | | | | - * | | | 4 | --------------------+ | | | - * | PMD | +-----------+ | | | - * | level | | 5 | ----------------------+ | | - * | mapping | +-----------+ | | - * | | | 6 | ------------------------+ | - * | | +-----------+ | - * | | | 7 | --------------------------+ + * | | | 1 | ---------------^ ^ ^ ^ ^ ^ ^ + * | | +-----------+ | | | | | | + * | | | 2 | -----------------+ | | | | | + * | | +-----------+ | | | | | + * | | | 3 | -------------------+ | | | | + * | | +-----------+ | | | | + * | | | 4 | ---------------------+ | | | + * | PMD | +-----------+ | | | + * | level | | 5 | -----------------------+ | | + * | mapping | +-----------+ | | + * | | | 6 | -------------------------+ | + * | | +-----------+ | + * | | | 7 | ---------------------------+ * | | +-----------+ * | | * | | * | | * +-----------+ * - * When a HugeTLB is freed to the buddy system, we should allocate 6 pages for + * When a HugeTLB is freed to the buddy system, we should allocate 7 pages for * vmemmap pages and restore the previous mapping relationship. * * For the HugeTLB page of the pud level mapping. It is similar to the former. - * We also can use this approach to free (PAGE_SIZE - 2) vmemmap pages. + * We also can use this approach to free (PAGE_SIZE - 1) vmemmap pages. * * Apart from the HugeTLB page of the pmd/pud level mapping, some architectures * (e.g. aarch64) provides a contiguous bit in the translation table entries @@ -166,7 +166,13 @@ * * The contiguous bit is used to increase the mapping size at the pmd and pte * (last) level. So this type of HugeTLB page can be optimized only when its - * size of the struct page structs is greater than 2 pages. + * size of the struct page structs is greater than 1 page. + * + * Notice: The head vmemmap page is not freed to the buddy allocator and all + * tail vmemmap pages are mapped to the head vmemmap page frame. So we can see + * more than one struct page struct with PG_head (e.g. 8 per 2 MB HugeTLB page) + * associated with each HugeTLB page. The compound_head() can handle this + * correctly (more details refer to the comment above compound_head()). */ #define pr_fmt(fmt) "HugeTLB: " fmt @@ -175,19 +181,21 @@ /* * There are a lot of struct page structures associated with each HugeTLB page. * For tail pages, the value of compound_head is the same. So we can reuse first - * page of tail page structures. We map the virtual addresses of the remaining - * pages of tail page structures to the first tail page struct, and then free - * these page frames. Therefore, we need to reserve two pages as vmemmap areas. + * page of head page structures. We map the virtual addresses of all the pages + * of tail page structures to the head page struct, and then free these page + * frames. Therefore, we need to reserve one pages as vmemmap areas. */ -#define RESERVE_VMEMMAP_NR 2U +#define RESERVE_VMEMMAP_NR 1U #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) -bool hugetlb_free_vmemmap_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); +bool hugetlb_free_vmemmap_enabled __read_mostly = + IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); +EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled); static int __init early_hugetlb_free_vmemmap_param(char *buf) { /* We cannot optimize if a "struct page" crosses page boundaries. */ - if ((!is_power_of_2(sizeof(struct page)))) { + if (!is_power_of_2(sizeof(struct page))) { pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n"); return 0; } @@ -236,7 +244,6 @@ int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) */ ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse, GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); - if (!ret) ClearHPageVmemmapOptimized(head); @@ -282,9 +289,8 @@ void __init hugetlb_vmemmap_init(struct hstate *h) vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; /* - * The head page and the first tail page are not to be freed to buddy - * allocator, the other pages will map to the first tail page, so they - * can be freed. + * The head page is not to be freed to buddy allocator, the other tail + * pages will map to the head page, so they can be freed. * * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true * on some architectures (e.g. aarch64). See Documentation/arm64/ diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index db6df27c852a..e881f5db7091 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -245,6 +245,26 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, set_pte_at(&init_mm, addr, pte, entry); } +/* + * How many struct page structs need to be reset. When we reuse the head + * struct page, the special metadata (e.g. page->flags or page->mapping) + * cannot copy to the tail struct page structs. The invalid value will be + * checked in the free_tail_pages_check(). In order to avoid the message + * of "corrupted mapping in tail page". We need to reset at least 3 (one + * head struct page struct and two tail struct page structs) struct page + * structs. + */ +#define NR_RESET_STRUCT_PAGE 3 + +static inline void reset_struct_pages(struct page *start) +{ + int i; + struct page *from = start + NR_RESET_STRUCT_PAGE; + + for (i = 0; i < NR_RESET_STRUCT_PAGE; i++) + memcpy(start + i, from, sizeof(*from)); +} + static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk) { @@ -258,6 +278,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, list_del(&page->lru); to = page_to_virt(page); copy_page(to, (void *)walk->reuse_addr); + reset_struct_pages(to); set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); } -- cgit v1.2.3-71-gd317 From a6b40850c442bf996e729e1d441d3dbc37cea171 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:45:03 -0700 Subject: mm: hugetlb: replace hugetlb_free_vmemmap_enabled with a static_key The page_fixed_fake_head() is used throughout memory management and the conditional check requires checking a global variable, although the overhead of this check may be small, it increases when the memory cache comes under pressure. Also, the global variable will not be modified after system boot, so it is very appropriate to use static key machanism. Link: https://lkml.kernel.org/r/20211101031651.75851-3-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Barry Song Cc: Bodeddula Balasubramaniam Cc: Chen Huang Cc: David Hildenbrand Cc: Fam Zheng Cc: Jonathan Corbet Cc: Matthew Wilcox Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Qi Zheng Cc: Xiongchun Duan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 ------ include/linux/page-flags.h | 16 ++++++++++++++-- mm/hugetlb_vmemmap.c | 12 ++++++------ mm/memory_hotplug.c | 2 +- 4 files changed, 21 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 52c462390aee..08357b4c7be7 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1075,12 +1075,6 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr } #endif /* CONFIG_HUGETLB_PAGE */ -#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP -extern bool hugetlb_free_vmemmap_enabled; -#else -#define hugetlb_free_vmemmap_enabled false -#endif - static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 111e453f23d2..340cb8156568 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -191,7 +191,14 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP -extern bool hugetlb_free_vmemmap_enabled; +DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, + hugetlb_free_vmemmap_enabled_key); + +static __always_inline bool hugetlb_free_vmemmap_enabled(void) +{ + return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, + &hugetlb_free_vmemmap_enabled_key); +} /* * If the feature of freeing some vmemmap pages associated with each HugeTLB @@ -211,7 +218,7 @@ extern bool hugetlb_free_vmemmap_enabled; */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { - if (!hugetlb_free_vmemmap_enabled) + if (!hugetlb_free_vmemmap_enabled()) return page; /* @@ -239,6 +246,11 @@ static inline const struct page *page_fixed_fake_head(const struct page *page) { return page; } + +static inline bool hugetlb_free_vmemmap_enabled(void) +{ + return false; +} #endif static __always_inline int page_is_fake_head(struct page *page) diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 4977f5a520c2..791626983c2e 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -188,9 +188,9 @@ #define RESERVE_VMEMMAP_NR 1U #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) -bool hugetlb_free_vmemmap_enabled __read_mostly = - IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); -EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled); +DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, + hugetlb_free_vmemmap_enabled_key); +EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key); static int __init early_hugetlb_free_vmemmap_param(char *buf) { @@ -204,9 +204,9 @@ static int __init early_hugetlb_free_vmemmap_param(char *buf) return -EINVAL; if (!strcmp(buf, "on")) - hugetlb_free_vmemmap_enabled = true; + static_branch_enable(&hugetlb_free_vmemmap_enabled_key); else if (!strcmp(buf, "off")) - hugetlb_free_vmemmap_enabled = false; + static_branch_disable(&hugetlb_free_vmemmap_enabled_key); else return -EINVAL; @@ -284,7 +284,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h) BUILD_BUG_ON(__NR_USED_SUBPAGE >= RESERVE_VMEMMAP_SIZE / sizeof(struct page)); - if (!hugetlb_free_vmemmap_enabled) + if (!hugetlb_free_vmemmap_enabled()) return; vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2a9627dc784c..0139b77c51d5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1327,7 +1327,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size) * populate a single PMD. */ return memmap_on_memory && - !hugetlb_free_vmemmap_enabled && + !hugetlb_free_vmemmap_enabled() && IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && size == memory_block_size_bytes() && IS_ALIGNED(vmemmap_size, PMD_SIZE) && -- cgit v1.2.3-71-gd317 From e54084173487804f5e2f23facf107fd9336e637e Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 22 Mar 2022 14:45:12 -0700 Subject: mm: sparsemem: move vmemmap related to HugeTLB to CONFIG_HUGETLB_PAGE_FREE_VMEMMAP The vmemmap_remap_free/alloc are relevant to HugeTLB, so move those functiongs to the scope of CONFIG_HUGETLB_PAGE_FREE_VMEMMAP. Link: https://lkml.kernel.org/r/20211101031651.75851-6-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Barry Song Cc: Bodeddula Balasubramaniam Cc: Chen Huang Cc: David Hildenbrand Cc: Fam Zheng Cc: Jonathan Corbet Cc: Matthew Wilcox Cc: Michal Hocko Cc: Mike Kravetz Cc: Oscar Salvador Cc: Qi Zheng Cc: Xiongchun Duan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/sparse-vmemmap.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 45a449e8c209..9d58321386cc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3146,10 +3146,12 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP int vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse); int vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse, gfp_t gfp_mask); +#endif void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index c64d1aa3c4b5..8aecd6b3896c 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -34,6 +34,7 @@ #include #include +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP /** * struct vmemmap_remap_walk - walk vmemmap page table * @@ -419,6 +420,7 @@ int vmemmap_remap_alloc(unsigned long start, unsigned long end, return 0; } +#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */ /* * Allocate a block of memory to be used to back the virtual memory map -- cgit v1.2.3-71-gd317 From 824ddc601adc2cc48efb7f58b57997986c1c1276 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 22 Mar 2022 14:45:32 -0700 Subject: userfaultfd: provide unmasked address on page-fault Userfaultfd is supposed to provide the full address (i.e., unmasked) of the faulting access back to userspace. However, that is not the case for quite some time. Even running "userfaultfd_demo" from the userfaultfd man page provides the wrong output (and contradicts the man page). Notice that "UFFD_EVENT_PAGEFAULT event" shows the masked address (7fc5e30b3000) and not the first read address (0x7fc5e30b300f). Address returned by mmap() = 0x7fc5e30b3000 fault_handler_thread(): poll() returns: nready = 1; POLLIN = 1; POLLERR = 0 UFFD_EVENT_PAGEFAULT event: flags = 0; address = 7fc5e30b3000 (uffdio_copy.copy returned 4096) Read address 0x7fc5e30b300f in main(): A Read address 0x7fc5e30b340f in main(): A Read address 0x7fc5e30b380f in main(): A Read address 0x7fc5e30b3c0f in main(): A The exact address is useful for various reasons and specifically for prefetching decisions. If it is known that the memory is populated by certain objects whose size is not page-aligned, then based on the faulting address, the uffd-monitor can decide whether to prefetch and prefault the adjacent page. This bug has been for quite some time in the kernel: since commit 1a29d85eb0f1 ("mm: use vmf->address instead of of vmf->virtual_address") vmf->virtual_address"), which dates back to 2016. A concern has been raised that existing userspace application might rely on the old/wrong behavior in which the address is masked. Therefore, it was suggested to provide the masked address unless the user explicitly asks for the exact address. Add a new userfaultfd feature UFFD_FEATURE_EXACT_ADDRESS to direct userfaultfd to provide the exact address. Add a new "real_address" field to vmf to hold the unmasked address. Provide the address to userspace accordingly. Initialize real_address in various code-paths to be consistent with address, even when it is not used, to be on the safe side. [namit@vmware.com: initialize real_address on all code paths, per Jan] Link: https://lkml.kernel.org/r/20220226022655.350562-1-namit@vmware.com [akpm@linux-foundation.org: fix typo in comment, per Jan] Link: https://lkml.kernel.org/r/20220218041003.3508-1-namit@vmware.com Signed-off-by: Nadav Amit Acked-by: Peter Xu Reviewed-by: David Hildenbrand Acked-by: Mike Rapoport Reviewed-by: Jan Kara Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 5 ++++- include/linux/mm.h | 3 ++- include/uapi/linux/userfaultfd.h | 8 +++++++- mm/hugetlb.c | 6 ++++-- mm/memory.c | 1 + mm/swapfile.c | 1 + 6 files changed, 19 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 8e03b3d3f5fa..aa0c47cb0d16 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -198,6 +198,9 @@ static inline struct uffd_msg userfault_msg(unsigned long address, struct uffd_msg msg; msg_init(&msg); msg.event = UFFD_EVENT_PAGEFAULT; + + if (!(features & UFFD_FEATURE_EXACT_ADDRESS)) + address &= PAGE_MASK; msg.arg.pagefault.address = address; /* * These flags indicate why the userfault occurred: @@ -482,7 +485,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; - uwq.msg = userfault_msg(vmf->address, vmf->flags, reason, + uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason, ctx->features); uwq.ctx = ctx; uwq.waken = false; diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d58321386cc..0e4fd101616e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -478,7 +478,8 @@ struct vm_fault { struct vm_area_struct *vma; /* Target VMA */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - unsigned long address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address - masked */ + unsigned long real_address; /* Faulting virtual address - unmasked */ }; enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 05b31d60acf6..ef739054cb1c 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -32,7 +32,8 @@ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ UFFD_FEATURE_MINOR_HUGETLBFS | \ - UFFD_FEATURE_MINOR_SHMEM) + UFFD_FEATURE_MINOR_SHMEM | \ + UFFD_FEATURE_EXACT_ADDRESS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -189,6 +190,10 @@ struct uffdio_api { * * UFFD_FEATURE_MINOR_SHMEM indicates the same support as * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. + * + * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page + * faults would be provided and the offset within the page would not be + * masked. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -201,6 +206,7 @@ struct uffdio_api { #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) #define UFFD_FEATURE_MINOR_SHMEM (1<<10) +#define UFFD_FEATURE_EXACT_ADDRESS (1<<11) __u64 features; __u64 ioctls; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f425147e4ce3..75b41879e9e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5341,6 +5341,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, pgoff_t idx, unsigned int flags, unsigned long haddr, + unsigned long addr, unsigned long reason) { vm_fault_t ret; @@ -5348,6 +5349,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, struct vm_fault vmf = { .vma = vma, .address = haddr, + .real_address = addr, .flags = flags, /* @@ -5416,7 +5418,7 @@ retry: /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { ret = hugetlb_handle_userfault(vma, mapping, idx, - flags, haddr, + flags, haddr, address, VM_UFFD_MISSING); goto out; } @@ -5480,7 +5482,7 @@ retry: unlock_page(page); put_page(page); ret = hugetlb_handle_userfault(vma, mapping, idx, - flags, haddr, + flags, haddr, address, VM_UFFD_MINOR); goto out; } diff --git a/mm/memory.c b/mm/memory.c index 1a55b4c5b5db..e0f3410fa70c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4633,6 +4633,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, struct vm_fault vmf = { .vma = vma, .address = address & PAGE_MASK, + .real_address = address, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), diff --git a/mm/swapfile.c b/mm/swapfile.c index bf0df7aa7158..33c7abb16610 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1951,6 +1951,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, struct vm_fault vmf = { .vma = vma, .address = addr, + .real_address = addr, .pmd = pmd, }; -- cgit v1.2.3-71-gd317 From b698f0a1773f7df73f2bb4bfe0e597ea1bb3881f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 22 Mar 2022 14:45:38 -0700 Subject: mm/fs: delete PF_SWAPWRITE PF_SWAPWRITE has been redundant since v3.2 commit ee72886d8ed5 ("mm: vmscan: do not writeback filesystem pages in direct reclaim"). Coincidentally, NeilBrown's current patch "remove inode_congested()" deletes may_write_to_inode(), which appeared to be the one function which took notice of PF_SWAPWRITE. But if you study the old logic, and the conditions under which may_write_to_inode() was called, you discover that flag and function have been pointless for a decade. Link: https://lkml.kernel.org/r/75e80e7-742d-e3bd-531-614db8961e4@google.com Signed-off-by: Hugh Dickins Cc: NeilBrown Cc: Jan Kara Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 3 --- fs/xfs/libxfs/xfs_btree.c | 2 +- include/linux/sched.h | 1 - mm/migrate.c | 7 ------- mm/vmscan.c | 8 ++------ 5 files changed, 3 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 42a3dfad40b8..3d8dacfff150 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2197,7 +2197,6 @@ void wb_workfn(struct work_struct *work) long pages_written; set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); - current->flags |= PF_SWAPWRITE; if (likely(!current_is_workqueue_rescuer() || !test_bit(WB_registered, &wb->state))) { @@ -2226,8 +2225,6 @@ void wb_workfn(struct work_struct *work) wb_wakeup(wb); else if (wb_has_dirty_io(wb) && dirty_writeback_interval) wb_wakeup_delayed(wb); - - current->flags &= ~PF_SWAPWRITE; } /* diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index f18a875f51c6..c1500b238520 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -2818,7 +2818,7 @@ xfs_btree_split_worker( * in any way. */ if (args->kswapd) - new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; + new_pflags |= PF_MEMALLOC | PF_KSWAPD; current_set_flags_nested(&pflags, new_pflags); xfs_trans_set_context(args->cur->bc_tp); diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..c3c841a02a00 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1689,7 +1689,6 @@ extern struct pid *cad_pid; * I am cleaning dirty pages from some other bdi. */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ diff --git a/mm/migrate.c b/mm/migrate.c index 4359d49b508a..fc75c409482e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1350,7 +1350,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, bool is_thp = false; struct page *page; struct page *page2; - int swapwrite = current->flags & PF_SWAPWRITE; int rc, nr_subpages; LIST_HEAD(ret_pages); LIST_HEAD(thp_split_pages); @@ -1359,9 +1358,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, trace_mm_migrate_pages_start(mode, reason); - if (!swapwrite) - current->flags |= PF_SWAPWRITE; - thp_subpage_migration: for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { retry = 0; @@ -1516,9 +1512,6 @@ out: trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, nr_thp_failed, nr_thp_split, mode, reason); - if (!swapwrite) - current->flags &= ~PF_SWAPWRITE; - if (ret_succeeded) *ret_succeeded = nr_succeeded; diff --git a/mm/vmscan.c b/mm/vmscan.c index 5e1469887afa..d4168966311f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4457,7 +4457,7 @@ static int kswapd(void *p) * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ - tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; + tsk->flags |= PF_MEMALLOC | PF_KSWAPD; set_freezable(); WRITE_ONCE(pgdat->kswapd_order, 0); @@ -4508,7 +4508,7 @@ kswapd_try_sleep: goto kswapd_try_sleep; } - tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); + tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); return 0; } @@ -4749,11 +4749,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in fs_reclaim_acquire(sc.gfp_mask); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP - * and we also need to be able to write out pages for RECLAIM_WRITE - * and RECLAIM_UNMAP. */ noreclaim_flag = memalloc_noreclaim_save(); - p->flags |= PF_SWAPWRITE; set_task_reclaim_state(p, &sc.reclaim_state); if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { @@ -4767,7 +4764,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in } set_task_reclaim_state(p, NULL); - current->flags &= ~PF_SWAPWRITE; memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); psi_memstall_leave(&pflags); -- cgit v1.2.3-71-gd317 From 89f6c88a6ab4a11deb14c270f7f1454cda4f73d6 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 22 Mar 2022 14:45:41 -0700 Subject: mm: __isolate_lru_page_prepare() in isolate_migratepages_block() __isolate_lru_page_prepare() conflates two unrelated functions, with the flags to one disjoint from the flags to the other; and hides some of the important checks outside of isolate_migratepages_block(), where the sequence is better to be visible. It comes from the days of lumpy reclaim, before compaction, when the combination made more sense. Move what's needed by mm/compaction.c isolate_migratepages_block() inline there, and what's needed by mm/vmscan.c isolate_lru_pages() inline there. Shorten "isolate_mode" to "mode", so the sequence of conditions is easier to read. Declare a "mapping" variable, to save one call to page_mapping() (but not another: calling again after page is locked is necessary). Simplify isolate_lru_pages() with a "move_to" list pointer. Link: https://lkml.kernel.org/r/879d62a8-91cc-d3c6-fb3b-69768236df68@google.com Signed-off-by: Hugh Dickins Acked-by: David Rientjes Reviewed-by: Alex Shi Cc: Alexander Duyck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 1 - mm/compaction.c | 51 ++++++++++++++++++++++---- mm/vmscan.c | 101 +++++++++------------------------------------------ 3 files changed, 62 insertions(+), 91 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 3db431276d82..a246c137678e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -387,7 +387,6 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, diff --git a/mm/compaction.c b/mm/compaction.c index b4e94cda3019..b72fb9da4c45 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -785,7 +785,7 @@ static bool too_many_isolated(pg_data_t *pgdat) * @cc: Compaction control structure. * @low_pfn: The first PFN to isolate * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock - * @isolate_mode: Isolation mode to be used. + * @mode: Isolation mode to be used. * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. @@ -798,7 +798,7 @@ static bool too_many_isolated(pg_data_t *pgdat) */ static int isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, - unsigned long end_pfn, isolate_mode_t isolate_mode) + unsigned long end_pfn, isolate_mode_t mode) { pg_data_t *pgdat = cc->zone->zone_pgdat; unsigned long nr_scanned = 0, nr_isolated = 0; @@ -806,6 +806,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned long flags = 0; struct lruvec *locked = NULL; struct page *page = NULL, *valid_page = NULL; + struct address_space *mapping; unsigned long start_pfn = low_pfn; bool skip_on_failure = false; unsigned long next_skip_pfn = 0; @@ -990,7 +991,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = NULL; } - if (!isolate_movable_page(page, isolate_mode)) + if (!isolate_movable_page(page, mode)) goto isolate_success; } @@ -1002,15 +1003,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ - if (!page_mapping(page) && - page_count(page) > page_mapcount(page)) + mapping = page_mapping(page); + if (!mapping && page_count(page) > page_mapcount(page)) goto isolate_fail; /* * Only allow to migrate anonymous pages in GFP_NOFS context * because those do not depend on fs locks. */ - if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) + if (!(cc->gfp_mask & __GFP_FS) && mapping) goto isolate_fail; /* @@ -1021,9 +1022,45 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (unlikely(!get_page_unless_zero(page))) goto isolate_fail; - if (!__isolate_lru_page_prepare(page, isolate_mode)) + /* Only take pages on LRU: a check now makes later tests safe */ + if (!PageLRU(page)) + goto isolate_fail_put; + + /* Compaction might skip unevictable pages but CMA takes them */ + if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) + goto isolate_fail_put; + + /* + * To minimise LRU disruption, the caller can indicate with + * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages + * it will be able to migrate without blocking - clean pages + * for the most part. PageWriteback would require blocking. + */ + if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) goto isolate_fail_put; + if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { + bool migrate_dirty; + + /* + * Only pages without mappings or that have a + * ->migratepage callback are possible to migrate + * without blocking. However, we can be racing with + * truncation so it's necessary to lock the page + * to stabilise the mapping as truncation holds + * the page lock until after the page is removed + * from the page cache. + */ + if (!trylock_page(page)) + goto isolate_fail_put; + + mapping = page_mapping(page); + migrate_dirty = !mapping || mapping->a_ops->migratepage; + unlock_page(page); + if (!migrate_dirty) + goto isolate_fail_put; + } + /* Try isolate the page */ if (!TestClearPageLRU(page)) goto isolate_fail_put; diff --git a/mm/vmscan.c b/mm/vmscan.c index d4168966311f..24601f394c25 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1998,69 +1998,6 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, return nr_reclaimed; } -/* - * Attempt to remove the specified page from its LRU. Only take this page - * if it is of the appropriate PageActive status. Pages which are being - * freed elsewhere are also ignored. - * - * page: page to consider - * mode: one of the LRU isolation modes defined above - * - * returns true on success, false on failure. - */ -bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) -{ - /* Only take pages on the LRU. */ - if (!PageLRU(page)) - return false; - - /* Compaction should not handle unevictable pages but CMA can do so */ - if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) - return false; - - /* - * To minimise LRU disruption, the caller can indicate that it only - * wants to isolate pages it will be able to operate on without - * blocking - clean pages for the most part. - * - * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages - * that it is possible to migrate without blocking - */ - if (mode & ISOLATE_ASYNC_MIGRATE) { - /* All the caller can do on PageWriteback is block */ - if (PageWriteback(page)) - return false; - - if (PageDirty(page)) { - struct address_space *mapping; - bool migrate_dirty; - - /* - * Only pages without mappings or that have a - * ->migratepage callback are possible to migrate - * without blocking. However, we can be racing with - * truncation so it's necessary to lock the page - * to stabilise the mapping as truncation holds - * the page lock until after the page is removed - * from the page cache. - */ - if (!trylock_page(page)) - return false; - - mapping = page_mapping(page); - migrate_dirty = !mapping || mapping->a_ops->migratepage; - unlock_page(page); - if (!migrate_dirty) - return false; - } - } - - if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) - return false; - - return true; -} - /* * Update LRU sizes after isolating pages. The LRU size updates must * be complete before mem_cgroup_update_lru_size due to a sanity check. @@ -2112,11 +2049,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, unsigned long skipped = 0; unsigned long scan, total_scan, nr_pages; LIST_HEAD(pages_skipped); - isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED); total_scan = 0; scan = 0; while (scan < nr_to_scan && !list_empty(src)) { + struct list_head *move_to = src; struct page *page; page = lru_to_page(src); @@ -2126,9 +2063,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, total_scan += nr_pages; if (page_zonenum(page) > sc->reclaim_idx) { - list_move(&page->lru, &pages_skipped); nr_skipped[page_zonenum(page)] += nr_pages; - continue; + move_to = &pages_skipped; + goto move; } /* @@ -2136,37 +2073,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, * return with no isolated pages if the LRU mostly contains * ineligible pages. This causes the VM to not reclaim any * pages, triggering a premature OOM. - * - * Account all tail pages of THP. This would not cause - * premature OOM since __isolate_lru_page() returns -EBUSY - * only when the page is being freed somewhere else. + * Account all tail pages of THP. */ scan += nr_pages; - if (!__isolate_lru_page_prepare(page, mode)) { - /* It is being freed elsewhere */ - list_move(&page->lru, src); - continue; - } + + if (!PageLRU(page)) + goto move; + if (!sc->may_unmap && page_mapped(page)) + goto move; + /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ - if (unlikely(!get_page_unless_zero(page))) { - list_move(&page->lru, src); - continue; - } + if (unlikely(!get_page_unless_zero(page))) + goto move; if (!TestClearPageLRU(page)) { /* Another thread is already isolating this page */ put_page(page); - list_move(&page->lru, src); - continue; + goto move; } nr_taken += nr_pages; nr_zone_taken[page_zonenum(page)] += nr_pages; - list_move(&page->lru, dst); + move_to = dst; +move: + list_move(&page->lru, move_to); } /* @@ -2190,7 +2124,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, } *nr_scanned = total_scan; trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, - total_scan, skipped, nr_taken, mode, lru); + total_scan, skipped, nr_taken, + sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru); update_lru_sizes(lruvec, lru, nr_zone_taken); return nr_taken; } -- cgit v1.2.3-71-gd317 From 356ea3865687926e5da7579d1f3351d3f0a322a1 Mon Sep 17 00:00:00 2001 From: "andrew.yang" Date: Tue, 22 Mar 2022 14:46:08 -0700 Subject: mm/migrate: fix race between lock page and clear PG_Isolated When memory is tight, system may start to compact memory for large continuous memory demands. If one process tries to lock a memory page that is being locked and isolated for compaction, it may wait a long time or even forever. This is because compaction will perform non-atomic PG_Isolated clear while holding page lock, this may overwrite PG_waiters set by the process that can't obtain the page lock and add itself to the waiting queue to wait for the lock to be unlocked. CPU1 CPU2 lock_page(page); (successful) lock_page(); (failed) __ClearPageIsolated(page); SetPageWaiters(page) (may be overwritten) unlock_page(page); The solution is to not perform non-atomic operation on page flags while holding page lock. Link: https://lkml.kernel.org/r/20220315030515.20263-1-andrew.yang@mediatek.com Signed-off-by: andrew.yang Cc: Matthias Brugger Cc: Matthew Wilcox Cc: "Vlastimil Babka" Cc: David Howells Cc: "William Kucharski" Cc: David Hildenbrand Cc: Yang Shi Cc: Marc Zyngier Cc: Nicholas Tang Cc: Kuan-Ying Lee Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 2 +- mm/migrate.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 340cb8156568..88fe1d759cdd 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1000,7 +1000,7 @@ PAGE_TYPE_OPS(Guard, guard) extern bool is_free_buddy_page(struct page *page); -__PAGEFLAG(Isolated, isolated, PF_ANY); +PAGEFLAG(Isolated, isolated, PF_ANY); #ifdef CONFIG_MMU #define __PG_MLOCKED (1UL << PG_mlocked) diff --git a/mm/migrate.c b/mm/migrate.c index 67616ee4aa26..c0d16f050fec 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -107,7 +107,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) /* Driver shouldn't use PG_isolated bit of page->flags */ WARN_ON_ONCE(PageIsolated(page)); - __SetPageIsolated(page); + SetPageIsolated(page); unlock_page(page); return 0; @@ -126,7 +126,7 @@ static void putback_movable_page(struct page *page) mapping = page_mapping(page); mapping->a_ops->putback_page(page); - __ClearPageIsolated(page); + ClearPageIsolated(page); } /* @@ -159,7 +159,7 @@ void putback_movable_pages(struct list_head *l) if (PageMovable(page)) putback_movable_page(page); else - __ClearPageIsolated(page); + ClearPageIsolated(page); unlock_page(page); put_page(page); } else { @@ -883,7 +883,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!PageIsolated(page), page); if (!PageMovable(page)) { rc = MIGRATEPAGE_SUCCESS; - __ClearPageIsolated(page); + ClearPageIsolated(page); goto out; } @@ -905,7 +905,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, * We clear PG_movable under page_lock so any compactor * cannot try to migrate this page. */ - __ClearPageIsolated(page); + ClearPageIsolated(page); } /* @@ -1091,7 +1091,7 @@ static int unmap_and_move(new_page_t get_new_page, if (unlikely(__PageMovable(page))) { lock_page(page); if (!PageMovable(page)) - __ClearPageIsolated(page); + ClearPageIsolated(page); unlock_page(page); } goto out; -- cgit v1.2.3-71-gd317 From 27d121d0ec6d604d0147c5b579e4181b688a2d64 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Tue, 22 Mar 2022 14:46:14 -0700 Subject: mm/cma: provide option to opt out from exposing pages on activation failure Patch series "powerpc/fadump: handle CMA activation failure appropriately", v3. Commit 072355c1cf2d ("mm/cma: expose all pages to the buddy if activation of an area fails") started exposing all pages to buddy allocator on CMA activation failure. But there can be CMA users that want to handle the reserved memory differently on CMA allocation failure. Provide an option to opt out from exposing pages to buddy for such cases. Link: https://lkml.kernel.org/r/20220117075246.36072-1-hbathini@linux.ibm.com Link: https://lkml.kernel.org/r/20220117075246.36072-2-hbathini@linux.ibm.com Signed-off-by: Hari Bathini Reviewed-by: David Hildenbrand Cc: Oscar Salvador Cc: Mike Kravetz Cc: Mahesh Salgaonkar Cc: Sourabh Jain Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cma.h | 2 ++ mm/cma.c | 11 +++++++++-- mm/cma.h | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cma.h b/include/linux/cma.h index b1ba94f1cc9c..90fd742fd1ef 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -58,4 +58,6 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); + +extern void cma_reserve_pages_on_error(struct cma *cma); #endif diff --git a/mm/cma.c b/mm/cma.c index 5a2cd5851658..eaa4b5c920a2 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -131,8 +131,10 @@ not_in_zone: bitmap_free(cma->bitmap); out_error: /* Expose all pages to the buddy, they are useless for CMA. */ - for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) - free_reserved_page(pfn_to_page(pfn)); + if (!cma->reserve_pages_on_error) { + for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) + free_reserved_page(pfn_to_page(pfn)); + } totalcma_pages -= cma->count; cma->count = 0; pr_err("CMA area %s could not be activated\n", cma->name); @@ -150,6 +152,11 @@ static int __init cma_init_reserved_areas(void) } core_initcall(cma_init_reserved_areas); +void __init cma_reserve_pages_on_error(struct cma *cma) +{ + cma->reserve_pages_on_error = true; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area diff --git a/mm/cma.h b/mm/cma.h index 2c775877eae2..88a0595670b7 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -30,6 +30,7 @@ struct cma { /* kobject requires dynamic object */ struct cma_kobject *cma_kobj; #endif + bool reserve_pages_on_error; }; extern struct cma cma_areas[MAX_CMA_AREAS]; -- cgit v1.2.3-71-gd317 From e39bb6be9f2b39a6dbaeff484361de76021b175d Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 22 Mar 2022 14:46:20 -0700 Subject: NUMA Balancing: add page promotion counter Patch series "NUMA balancing: optimize memory placement for memory tiering system", v13 With the advent of various new memory types, some machines will have multiple types of memory, e.g. DRAM and PMEM (persistent memory). The memory subsystem of these machines can be called memory tiering system, because the performance of the different types of memory are different. After commit c221c0b0308f ("device-dax: "Hotplug" persistent memory for use like normal RAM"), the PMEM could be used as the cost-effective volatile memory in separate NUMA nodes. In a typical memory tiering system, there are CPUs, DRAM and PMEM in each physical NUMA node. The CPUs and the DRAM will be put in one logical node, while the PMEM will be put in another (faked) logical node. To optimize the system overall performance, the hot pages should be placed in DRAM node. To do that, we need to identify the hot pages in the PMEM node and migrate them to DRAM node via NUMA migration. In the original NUMA balancing, there are already a set of existing mechanisms to identify the pages recently accessed by the CPUs in a node and migrate the pages to the node. So we can reuse these mechanisms to build the mechanisms to optimize the page placement in the memory tiering system. This is implemented in this patchset. At the other hand, the cold pages should be placed in PMEM node. So, we also need to identify the cold pages in the DRAM node and migrate them to PMEM node. In commit 26aa2d199d6f ("mm/migrate: demote pages during reclaim"), a mechanism to demote the cold DRAM pages to PMEM node under memory pressure is implemented. Based on that, the cold DRAM pages can be demoted to PMEM node proactively to free some memory space on DRAM node to accommodate the promoted hot PMEM pages. This is implemented in this patchset too. We have tested the solution with the pmbench memory accessing benchmark with the 80:20 read/write ratio and the Gauss access address distribution on a 2 socket Intel server with Optane DC Persistent Memory Model. The test results shows that the pmbench score can improve up to 95.9%. This patch (of 3): In a system with multiple memory types, e.g. DRAM and PMEM, the CPU and DRAM in one socket will be put in one NUMA node as before, while the PMEM will be put in another NUMA node as described in the description of the commit c221c0b0308f ("device-dax: "Hotplug" persistent memory for use like normal RAM"). So, the NUMA balancing mechanism will identify all PMEM accesses as remote access and try to promote the PMEM pages to DRAM. To distinguish the number of the inter-type promoted pages from that of the inter-socket migrated pages. A new vmstat count is added. The counter is per-node (count in the target node). So this can be used to identify promotion imbalance among the NUMA nodes. Link: https://lkml.kernel.org/r/20220301085329.3210428-1-ying.huang@intel.com Link: https://lkml.kernel.org/r/20220221084529.1052339-1-ying.huang@intel.com Link: https://lkml.kernel.org/r/20220221084529.1052339-2-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Yang Shi Tested-by: Baolin Wang Reviewed-by: Baolin Wang Acked-by: Johannes Weiner Reviewed-by: Oscar Salvador Cc: Michal Hocko Cc: Rik van Riel Cc: Mel Gorman Cc: Peter Zijlstra Cc: Dave Hansen Cc: Zi Yan Cc: Wei Xu Cc: Shakeel Butt Cc: zhongjiang-ali Cc: Feng Tang Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +++ include/linux/node.h | 5 +++++ mm/migrate.c | 13 ++++++++++--- mm/vmstat.c | 3 +++ 4 files changed, 21 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c9e6a50109b9..310b6e7ce58a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -221,6 +221,9 @@ enum node_stat_item { NR_PAGETABLE, /* used for pagetables */ #ifdef CONFIG_SWAP NR_SWAPCACHE, +#endif +#ifdef CONFIG_NUMA_BALANCING + PGPROMOTE_SUCCESS, /* promote successfully */ #endif NR_VM_NODE_STAT_ITEMS }; diff --git a/include/linux/node.h b/include/linux/node.h index bb21fd631b16..81bbf1c0afd3 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -181,4 +181,9 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg, #define to_node(device) container_of(device, struct node, dev) +static inline bool node_is_toptier(int node) +{ + return node_state(node, N_CPU); +} + #endif /* _LINUX_NODE_H_ */ diff --git a/mm/migrate.c b/mm/migrate.c index c0d16f050fec..dc4adf979201 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2069,6 +2069,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, pg_data_t *pgdat = NODE_DATA(node); int isolated; int nr_remaining; + unsigned int nr_succeeded; LIST_HEAD(migratepages); new_page_t *new; bool compound; @@ -2107,7 +2108,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, list_add(&page->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, *new, NULL, node, - MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL); + MIGRATE_ASYNC, MR_NUMA_MISPLACED, + &nr_succeeded); if (nr_remaining) { if (!list_empty(&migratepages)) { list_del(&page->lru); @@ -2116,8 +2118,13 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, putback_lru_page(page); } isolated = 0; - } else - count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages); + } + if (nr_succeeded) { + count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); + if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) + mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, + nr_succeeded); + } BUG_ON(!list_empty(&migratepages)); return isolated; diff --git a/mm/vmstat.c b/mm/vmstat.c index 4057372745d0..846b670dd346 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1242,6 +1242,9 @@ const char * const vmstat_text[] = { #ifdef CONFIG_SWAP "nr_swapcached", #endif +#ifdef CONFIG_NUMA_BALANCING + "pgpromote_success", +#endif /* enum writeback_stat_item counters */ "nr_dirty_threshold", -- cgit v1.2.3-71-gd317 From c574bbe917036c8968b984c82c7b13194fe5ce98 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 22 Mar 2022 14:46:23 -0700 Subject: NUMA balancing: optimize page placement for memory tiering system With the advent of various new memory types, some machines will have multiple types of memory, e.g. DRAM and PMEM (persistent memory). The memory subsystem of these machines can be called memory tiering system, because the performance of the different types of memory are usually different. In such system, because of the memory accessing pattern changing etc, some pages in the slow memory may become hot globally. So in this patch, the NUMA balancing mechanism is enhanced to optimize the page placement among the different memory types according to hot/cold dynamically. In a typical memory tiering system, there are CPUs, fast memory and slow memory in each physical NUMA node. The CPUs and the fast memory will be put in one logical node (called fast memory node), while the slow memory will be put in another (faked) logical node (called slow memory node). That is, the fast memory is regarded as local while the slow memory is regarded as remote. So it's possible for the recently accessed pages in the slow memory node to be promoted to the fast memory node via the existing NUMA balancing mechanism. The original NUMA balancing mechanism will stop to migrate pages if the free memory of the target node becomes below the high watermark. This is a reasonable policy if there's only one memory type. But this makes the original NUMA balancing mechanism almost do not work to optimize page placement among different memory types. Details are as follows. It's the common cases that the working-set size of the workload is larger than the size of the fast memory nodes. Otherwise, it's unnecessary to use the slow memory at all. So, there are almost always no enough free pages in the fast memory nodes, so that the globally hot pages in the slow memory node cannot be promoted to the fast memory node. To solve the issue, we have 2 choices as follows, a. Ignore the free pages watermark checking when promoting hot pages from the slow memory node to the fast memory node. This will create some memory pressure in the fast memory node, thus trigger the memory reclaiming. So that, the cold pages in the fast memory node will be demoted to the slow memory node. b. Define a new watermark called wmark_promo which is higher than wmark_high, and have kswapd reclaiming pages until free pages reach such watermark. The scenario is as follows: when we want to promote hot-pages from a slow memory to a fast memory, but fast memory's free pages would go lower than high watermark with such promotion, we wake up kswapd with wmark_promo watermark in order to demote cold pages and free us up some space. So, next time we want to promote hot-pages we might have a chance of doing so. The choice "a" may create high memory pressure in the fast memory node. If the memory pressure of the workload is high, the memory pressure may become so high that the memory allocation latency of the workload is influenced, e.g. the direct reclaiming may be triggered. The choice "b" works much better at this aspect. If the memory pressure of the workload is high, the hot pages promotion will stop earlier because its allocation watermark is higher than that of the normal memory allocation. So in this patch, choice "b" is implemented. A new zone watermark (WMARK_PROMO) is added. Which is larger than the high watermark and can be controlled via watermark_scale_factor. In addition to the original page placement optimization among sockets, the NUMA balancing mechanism is extended to be used to optimize page placement according to hot/cold among different memory types. So the sysctl user space interface (numa_balancing) is extended in a backward compatible way as follow, so that the users can enable/disable these functionality individually. The sysctl is converted from a Boolean value to a bits field. The definition of the flags is, - 0: NUMA_BALANCING_DISABLED - 1: NUMA_BALANCING_NORMAL - 2: NUMA_BALANCING_MEMORY_TIERING We have tested the patch with the pmbench memory accessing benchmark with the 80:20 read/write ratio and the Gauss access address distribution on a 2 socket Intel server with Optane DC Persistent Memory Model. The test results shows that the pmbench score can improve up to 95.9%. Thanks Andrew Morton to help fix the document format error. Link: https://lkml.kernel.org/r/20220221084529.1052339-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" Tested-by: Baolin Wang Reviewed-by: Baolin Wang Acked-by: Johannes Weiner Reviewed-by: Oscar Salvador Reviewed-by: Yang Shi Cc: Michal Hocko Cc: Rik van Riel Cc: Mel Gorman Cc: Peter Zijlstra Cc: Dave Hansen Cc: Zi Yan Cc: Wei Xu Cc: Shakeel Butt Cc: zhongjiang-ali Cc: Randy Dunlap Cc: Feng Tang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/sysctl/kernel.rst | 29 ++++++++++++++++++++--------- include/linux/mmzone.h | 1 + include/linux/sched/sysctl.h | 10 ++++++++++ kernel/sched/core.c | 21 +++++++++++++++++---- kernel/sysctl.c | 2 +- mm/migrate.c | 16 ++++++++++++++-- mm/page_alloc.c | 3 ++- mm/vmscan.c | 6 +++++- 8 files changed, 70 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index d359bcfadd39..fdfd2b684822 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -595,16 +595,23 @@ Documentation/admin-guide/kernel-parameters.rst). numa_balancing ============== -Enables/disables automatic page fault based NUMA memory -balancing. Memory is moved automatically to nodes -that access it often. +Enables/disables and configures automatic page fault based NUMA memory +balancing. Memory is moved automatically to nodes that access it often. +The value to set can be the result of ORing the following: -Enables/disables automatic NUMA memory balancing. On NUMA machines, there -is a performance penalty if remote memory is accessed by a CPU. When this -feature is enabled the kernel samples what task thread is accessing memory -by periodically unmapping pages and later trapping a page fault. At the -time of the page fault, it is determined if the data being accessed should -be migrated to a local memory node. += ================================= +0 NUMA_BALANCING_DISABLED +1 NUMA_BALANCING_NORMAL +2 NUMA_BALANCING_MEMORY_TIERING += ================================= + +Or NUMA_BALANCING_NORMAL to optimize page placement among different +NUMA nodes to reduce remote accessing. On NUMA machines, there is a +performance penalty if remote memory is accessed by a CPU. When this +feature is enabled the kernel samples what task thread is accessing +memory by periodically unmapping pages and later trapping a page +fault. At the time of the page fault, it is determined if the data +being accessed should be migrated to a local memory node. The unmapping of pages and trapping faults incur additional overhead that ideally is offset by improved memory locality but there is no universal @@ -615,6 +622,10 @@ faults may be controlled by the `numa_balancing_scan_period_min_ms, numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms, numa_balancing_scan_size_mb`_, and numa_balancing_settle_count sysctls. +Or NUMA_BALANCING_MEMORY_TIERING to optimize page placement among +different types of memory (represented as different NUMA nodes) to +place the hot pages in the fast memory. This is implemented based on +unmapping and page fault too. numa_balancing_scan_period_min_ms, numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms, numa_balancing_scan_size_mb =============================================================================================================================== diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 310b6e7ce58a..962b14d403e8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -353,6 +353,7 @@ enum zone_watermarks { WMARK_MIN, WMARK_LOW, WMARK_HIGH, + WMARK_PROMO, NR_WMARK }; diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index c19dd5a2c05c..b5eec8854c5a 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -23,6 +23,16 @@ enum sched_tunable_scaling { SCHED_TUNABLESCALING_END, }; +#define NUMA_BALANCING_DISABLED 0x0 +#define NUMA_BALANCING_NORMAL 0x1 +#define NUMA_BALANCING_MEMORY_TIERING 0x2 + +#ifdef CONFIG_NUMA_BALANCING +extern int sysctl_numa_balancing_mode; +#else +#define sysctl_numa_balancing_mode 0 +#endif + /* * control realtime throttling: * diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9745613d531c..da6a60383645 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4279,7 +4279,9 @@ DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); #ifdef CONFIG_NUMA_BALANCING -void set_numabalancing_state(bool enabled) +int sysctl_numa_balancing_mode; + +static void __set_numabalancing_state(bool enabled) { if (enabled) static_branch_enable(&sched_numa_balancing); @@ -4287,13 +4289,22 @@ void set_numabalancing_state(bool enabled) static_branch_disable(&sched_numa_balancing); } +void set_numabalancing_state(bool enabled) +{ + if (enabled) + sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; + else + sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; + __set_numabalancing_state(enabled); +} + #ifdef CONFIG_PROC_SYSCTL int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int err; - int state = static_branch_likely(&sched_numa_balancing); + int state = sysctl_numa_balancing_mode; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; @@ -4303,8 +4314,10 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; - if (write) - set_numabalancing_state(state); + if (write) { + sysctl_numa_balancing_mode = state; + __set_numabalancing_state(state); + } return err; } #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 730ab56d9e92..3395b99d59a4 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1696,7 +1696,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sysctl_numa_balancing, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = SYSCTL_FOUR, }, #endif /* CONFIG_NUMA_BALANCING */ { diff --git a/mm/migrate.c b/mm/migrate.c index dc4adf979201..78b2cf87946d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -51,6 +51,7 @@ #include #include #include +#include #include @@ -2031,16 +2032,27 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) { int page_lru; int nr_pages = thp_nr_pages(page); + int order = compound_order(page); - VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); + VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); /* Do not migrate THP mapped by multiple processes */ if (PageTransHuge(page) && total_mapcount(page) > 1) return 0; /* Avoid migrating to a node that is nearly full */ - if (!migrate_balanced_pgdat(pgdat, nr_pages)) + if (!migrate_balanced_pgdat(pgdat, nr_pages)) { + int z; + + if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) + return 0; + for (z = pgdat->nr_zones - 1; z >= 0; z--) { + if (populated_zone(pgdat->node_zones + z)) + break; + } + wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); return 0; + } if (isolate_lru_page(page)) return 0; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a573aa9f5160..8b18a077c409 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8441,7 +8441,8 @@ static void __setup_per_zone_wmarks(void) zone->watermark_boost = 0; zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; - zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; + zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; + zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; spin_unlock_irqrestore(&zone->lock, flags); } diff --git a/mm/vmscan.c b/mm/vmscan.c index f5ec53f19f3b..499fa86e754a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -56,6 +56,7 @@ #include #include +#include #include "internal.h" @@ -3895,7 +3896,10 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) if (!managed_zone(zone)) continue; - mark = high_wmark_pages(zone); + if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) + mark = wmark_pages(zone, WMARK_PROMO); + else + mark = high_wmark_pages(zone); if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) return true; } -- cgit v1.2.3-71-gd317 From 4d45c3aff5ebf80d329eba0f90544d20224f612d Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 22 Mar 2022 14:46:33 -0700 Subject: mm/vmstat: add event for ksm swapping in copy When faults in from swap what used to be a KSM page and that page had been swapped in before, system has to make a copy, and leaves remerging the pages to a later pass of ksmd. That is not good for performace, we'd better to reduce this kind of copy. There are some ways to reduce it, for example lessen swappiness or madvise(, , MADV_MERGEABLE) range. So add this event to support doing this tuning. Just like this patch: "mm, THP, swap: add THP swapping out fallback counting". Link: https://lkml.kernel.org/r/20220113023839.758845-1-yang.yang29@zte.com.cn Signed-off-by: Yang Yang Reviewed-by: Ran Xiaokai Cc: Hugh Dickins Cc: Yang Shi Cc: Dave Hansen Cc: Saravanan D Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 3 +++ mm/ksm.c | 3 +++ mm/vmstat.c | 3 +++ 3 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 7b2363388bfa..16a0a4fd000b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -129,6 +129,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, +#ifdef CONFIG_KSM + KSM_SWPIN_COPY, +#endif #endif #ifdef CONFIG_X86 DIRECT_MAP_LEVEL2_SPLIT, diff --git a/mm/ksm.c b/mm/ksm.c index c20bd4d9a0d9..4a7f8614e57d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2595,6 +2595,9 @@ struct page *ksm_might_need_to_copy(struct page *page, SetPageDirty(new_page); __SetPageUptodate(new_page); __SetPageLocked(new_page); +#ifdef CONFIG_SWAP + count_vm_event(KSM_SWPIN_COPY); +#endif } return new_page; diff --git a/mm/vmstat.c b/mm/vmstat.c index 846b670dd346..d5cc8d739fac 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1388,6 +1388,9 @@ const char * const vmstat_text[] = { #ifdef CONFIG_SWAP "swap_ra", "swap_ra_hit", +#ifdef CONFIG_KSM + "ksm_swpin_copy", +#endif #endif #ifdef CONFIG_X86 "direct_map_level2_splits", -- cgit v1.2.3-71-gd317 From e930d999715073a70d306fb59a394ea8b84d0b45 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 22 Mar 2022 14:46:51 -0700 Subject: mm, memory_hotplug: make arch_alloc_nodedata independent on CONFIG_MEMORY_HOTPLUG Patch series "mm, memory_hotplug: handle unitialized numa node gracefully". The core of the fix is patch 2 which also links existing bug reports. The high level goal is to have all possible numa nodes have their pgdat allocated and initialized so for_each_possible_node(nid) NODE_DATA(nid) will never return garbage. This has proven to be problem in several places when an offline numa node is used for an allocation just to realize that node_data and therefore allocation fallback zonelists are not initialized and such an allocation request blows up. There were attempts to address that by checking node_online in several places including the page allocator. This patchset approaches the problem from a different perspective and instead of special casing, which just adds a runtime overhead, it allocates pglist_data for each possible node. This can add some memory overhead for platforms with high number of possible nodes if they do not contain any memory. This should be a rather rare configuration though. How to test this? David has provided and excellent howto: http://lkml.kernel.org/r/6e5ebc19-890c-b6dd-1924-9f25c441010d@redhat.com Patches 1 and 3-6 are mostly cleanups. The patchset has been reviewed by Rafael (thanks!) and the core fix tested by Rafael and Alexey (thanks to both). David has tested as per instructions above and hasn't found any fallouts in the memory hotplug scenarios. This patch (of 6): This is a preparatory patch and it doesn't introduce any functional change. It merely pulls out arch_alloc_nodedata (and co) outside of CONFIG_MEMORY_HOTPLUG because the following patch will need to call this from the generic MM code. Link: https://lkml.kernel.org/r/20220127085305.20890-1-mhocko@kernel.org Link: https://lkml.kernel.org/r/20220127085305.20890-2-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Rafael Aquini Acked-by: David Hildenbrand Acked-by: Mike Rapoport Reviewed-by: Oscar Salvador Reviewed-by: Wei Yang Cc: Alexey Makhalov Cc: Christoph Lameter Cc: Dennis Zhou Cc: Eric Dumazet Cc: Nico Pache Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/discontig.c | 2 - include/linux/memory_hotplug.h | 119 ++++++++++++++++++++--------------------- 2 files changed, 59 insertions(+), 62 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 791d4176e4a6..8dc8a554f774 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -608,7 +608,6 @@ void __init paging_init(void) zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } -#ifdef CONFIG_MEMORY_HOTPLUG pg_data_t *arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); @@ -626,7 +625,6 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) pgdat_list[update_node] = update_pgdat; scatter_node_data(); } -#endif #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index be48e003a518..4355983b364d 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,6 +16,65 @@ struct memory_group; struct resource; struct vmem_altmap; +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * Now, arch_free_nodedata() is just defined for error path of node_hot_add. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_free_nodedata(pg_data_t *pgdat); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) +#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) + +#ifdef CONFIG_NUMA +/* + * XXX: node aware allocation can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); @@ -154,66 +213,6 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params); #endif /* ARCH_HAS_ADD_PAGES */ -#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION -/* - * For supporting node-hotadd, we have to allocate a new pgdat. - * - * If an arch has generic style NODE_DATA(), - * node_data[nid] = kzalloc() works well. But it depends on the architecture. - * - * In general, generic_alloc_nodedata() is used. - * Now, arch_free_nodedata() is just defined for error path of node_hot_add. - * - */ -extern pg_data_t *arch_alloc_nodedata(int nid); -extern void arch_free_nodedata(pg_data_t *pgdat); -extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); - -#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - -#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) -#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) - -#ifdef CONFIG_NUMA -/* - * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. - * XXX: kmalloc_node() can't work well to get new node's memory at this time. - * Because, pgdat for the new node is not allocated/initialized yet itself. - * To use new node's memory, more consideration will be necessary. - */ -#define generic_alloc_nodedata(nid) \ -({ \ - kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ -}) -/* - * This definition is just for error path in node hotadd. - * For node hotremove, we have to replace this. - */ -#define generic_free_nodedata(pgdat) kfree(pgdat) - -extern pg_data_t *node_data[]; -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ - node_data[nid] = pgdat; -} - -#else /* !CONFIG_NUMA */ - -/* never called */ -static inline pg_data_t *generic_alloc_nodedata(int nid) -{ - BUG(); - return NULL; -} -static inline void generic_free_nodedata(pg_data_t *pgdat) -{ -} -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ -} -#endif /* CONFIG_NUMA */ -#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - void get_online_mems(void); void put_online_mems(void); -- cgit v1.2.3-71-gd317 From 09f49dca570a917a8c6bccd7e8c61f5141534e3a Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 22 Mar 2022 14:46:54 -0700 Subject: mm: handle uninitialized numa nodes gracefully We have had several reports [1][2][3] that page allocator blows up when an allocation from a possible node is requested. The underlying reason is that NODE_DATA for the specific node is not allocated. NUMA specific initialization is arch specific and it can vary a lot. E.g. x86 tries to initialize all nodes that have some cpu affinity (see init_cpu_to_node) but this can be insufficient because the node might be cpuless for example. One way to address this problem would be to check for !node_online nodes when trying to get a zonelist and silently fall back to another node. That is unfortunately adding a branch into allocator hot path and it doesn't handle any other potential NODE_DATA users. This patch takes a different approach (following a lead of [3]) and it pre allocates pgdat for all possible nodes in an arch indipendent code - free_area_init. All uninitialized nodes are treated as memoryless nodes. node_state of the node is not changed because that would lead to other side effects - e.g. sysfs representation of such a node and from past discussions [4] it is known that some tools might have problems digesting that. Newly allocated pgdat only gets a minimal initialization and the rest of the work is expected to be done by the memory hotplug - hotadd_new_pgdat (renamed to hotadd_init_pgdat). generic_alloc_nodedata is changed to use the memblock allocator because neither page nor slab allocators are available at the stage when all pgdats are allocated. Hotplug doesn't allocate pgdat anymore so we can use the early boot allocator. The only arch specific implementation is ia64 and that is changed to use the early allocator as well. [1] http://lkml.kernel.org/r/20211101201312.11589-1-amakhalov@vmware.com [2] http://lkml.kernel.org/r/20211207224013.880775-1-npache@redhat.com [3] http://lkml.kernel.org/r/20190114082416.30939-1-mhocko@kernel.org [4] http://lkml.kernel.org/r/20200428093836.27190-1-srikar@linux.vnet.ibm.com [akpm@linux-foundation.org: replace comment, per Mike] Link: https://lkml.kernel.org/r/Yfe7RBeLCijnWBON@dhcp22.suse.cz Reported-by: Alexey Makhalov Tested-by: Alexey Makhalov Reported-by: Nico Pache Acked-by: Rafael Aquini Tested-by: Rafael Aquini Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Acked-by: Mike Rapoport Signed-off-by: Michal Hocko Cc: Christoph Lameter Cc: Dennis Zhou Cc: Eric Dumazet Cc: Tejun Heo Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/discontig.c | 4 ++-- include/linux/memory_hotplug.h | 2 +- mm/internal.h | 2 ++ mm/memory_hotplug.c | 21 +++++++++------------ mm/page_alloc.c | 40 ++++++++++++++++++++++++++++++++++++---- 5 files changed, 50 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 8dc8a554f774..dd0cf4834eaa 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -608,11 +608,11 @@ void __init paging_init(void) zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } -pg_data_t *arch_alloc_nodedata(int nid) +pg_data_t * __init arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); - return kzalloc(size, GFP_KERNEL); + return memblock_alloc(size, SMP_CACHE_BYTES); } void arch_free_nodedata(pg_data_t *pgdat) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 4355983b364d..cdd66bfdf855 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -44,7 +44,7 @@ extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); */ #define generic_alloc_nodedata(nid) \ ({ \ - kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ + memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ }) /* * This definition is just for error path in node hotadd. diff --git a/mm/internal.h b/mm/internal.h index 9c298afb9688..f1554a4e249e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -707,4 +707,6 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); +DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); + #endif /* __MM_INTERNAL_H */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0139b77c51d5..11f39d0e76ec 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1162,19 +1162,21 @@ static void reset_node_present_pages(pg_data_t *pgdat) } /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ -static pg_data_t __ref *hotadd_new_pgdat(int nid) +static pg_data_t __ref *hotadd_init_pgdat(int nid) { struct pglist_data *pgdat; pgdat = NODE_DATA(nid); - if (!pgdat) { - pgdat = arch_alloc_nodedata(nid); - if (!pgdat) - return NULL; + /* + * NODE_DATA is preallocated (free_area_init) but its internal + * state is not allocated completely. Add missing pieces. + * Completely offline nodes stay around and they just need + * reintialization. + */ + if (pgdat->per_cpu_nodestats == &boot_nodestats) { pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); - arch_refresh_nodedata(nid, pgdat); } else { int cpu; /* @@ -1193,8 +1195,6 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid) } } - /* we can use NODE_DATA(nid) from here */ - pgdat->node_id = nid; pgdat->node_start_pfn = 0; /* init node's zones as empty zones, we don't have any present pages.*/ @@ -1246,7 +1246,7 @@ static int __try_online_node(int nid, bool set_node_online) if (node_online(nid)) return 0; - pgdat = hotadd_new_pgdat(nid); + pgdat = hotadd_init_pgdat(nid); if (!pgdat) { pr_err("Cannot online node %d due to NULL pgdat\n", nid); ret = -ENOMEM; @@ -1445,9 +1445,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) return ret; error: - /* rollback pgdat allocation and others */ - if (new_node) - rollback_node_hotadd(nid); if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) memblock_remove(start, size); error_mem_hotplug_end: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b18a077c409..4f141a4e5b64 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6341,7 +6341,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta #define BOOT_PAGESET_BATCH 1 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); -static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); +DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); static void __build_all_zonelists(void *data) { @@ -6363,7 +6363,11 @@ static void __build_all_zonelists(void *data) if (self && !node_online(self->node_id)) { build_zonelists(self); } else { - for_each_online_node(nid) { + /* + * All possible nodes have pgdat preallocated + * in free_area_init + */ + for_each_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); build_zonelists(pgdat); @@ -8063,8 +8067,36 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); + for_each_node(nid) { + pg_data_t *pgdat; + + if (!node_online(nid)) { + pr_info("Initializing node %d as memoryless\n", nid); + + /* Allocator not initialized yet */ + pgdat = arch_alloc_nodedata(nid); + if (!pgdat) { + pr_err("Cannot allocate %zuB for node %d.\n", + sizeof(*pgdat), nid); + continue; + } + arch_refresh_nodedata(nid, pgdat); + free_area_init_memoryless_node(nid); + + /* + * We do not want to confuse userspace by sysfs + * files/directories for node without any memory + * attached to it, so this node is not marked as + * N_MEMORY and not marked online so that no sysfs + * hierarchy will be created via register_one_node for + * it. The pgdat will get fully initialized by + * hotadd_init_pgdat() when memory is hotplugged into + * this node. + */ + continue; + } + + pgdat = NODE_DATA(nid); free_area_init_node(nid); /* Any memory on that node */ -- cgit v1.2.3-71-gd317 From 390511e1476eb1cc41d420a7661b33f4d8584c3f Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 22 Mar 2022 14:46:57 -0700 Subject: mm, memory_hotplug: drop arch_free_nodedata Prior to "mm: handle uninitialized numa nodes gracefully" memory hotplug used to allocate pgdat when memory has been added to a node (hotadd_init_pgdat) arch_free_nodedata has been only used in the failure path because once the pgdat is exported (to be visible by NODA_DATA(nid)) it cannot really be freed because there is no synchronization available for that. pgdat is allocated for each possible nodes now so the memory hotplug doesn't need to do the ever use arch_free_nodedata so drop it. This patch doesn't introduce any functional change. Link: https://lkml.kernel.org/r/20220127085305.20890-4-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Rafael Aquini Acked-by: David Hildenbrand Acked-by: Mike Rapoport Reviewed-by: Oscar Salvador Cc: Alexey Makhalov Cc: Christoph Lameter Cc: Dennis Zhou Cc: Eric Dumazet Cc: Nico Pache Cc: Tejun Heo Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/discontig.c | 5 ----- include/linux/memory_hotplug.h | 3 --- mm/memory_hotplug.c | 10 ---------- 3 files changed, 18 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index dd0cf4834eaa..73d0db36edb6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -615,11 +615,6 @@ pg_data_t * __init arch_alloc_nodedata(int nid) return memblock_alloc(size, SMP_CACHE_BYTES); } -void arch_free_nodedata(pg_data_t *pgdat) -{ - kfree(pgdat); -} - void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) { pgdat_list[update_node] = update_pgdat; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index cdd66bfdf855..60f09d3ebb3d 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -24,17 +24,14 @@ struct vmem_altmap; * node_data[nid] = kzalloc() works well. But it depends on the architecture. * * In general, generic_alloc_nodedata() is used. - * Now, arch_free_nodedata() is just defined for error path of node_hot_add. * */ extern pg_data_t *arch_alloc_nodedata(int nid); -extern void arch_free_nodedata(pg_data_t *pgdat); extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) -#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) #ifdef CONFIG_NUMA /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 11f39d0e76ec..55c3e5309088 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1217,16 +1217,6 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) return pgdat; } -static void rollback_node_hotadd(int nid) -{ - pg_data_t *pgdat = NODE_DATA(nid); - - arch_refresh_nodedata(nid, NULL); - free_percpu(pgdat->per_cpu_nodestats); - arch_free_nodedata(pgdat); -} - - /* * __try_online_node - online a node if offlined * @nid: the node ID -- cgit v1.2.3-71-gd317 From 70b5b46a754245d383811b4d2f2c76c34bb7e145 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 22 Mar 2022 14:47:00 -0700 Subject: mm, memory_hotplug: reorganize new pgdat initialization When a !node_online node is brought up it needs a hotplug specific initialization because the node could be either uninitialized yet or it could have been recycled after previous hotremove. hotadd_init_pgdat is responsible for that. Internal pgdat state is initialized at two places currently - hotadd_init_pgdat - free_area_init_core_hotplug There is no real clear cut what should go where but this patch's chosen to move the whole internal state initialization into free_area_init_core_hotplug. hotadd_init_pgdat is still responsible to pull all the parts together - most notably to initialize zonelists because those depend on the overall topology. This patch doesn't introduce any functional change. Link: https://lkml.kernel.org/r/20220127085305.20890-5-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Rafael Aquini Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Alexey Makhalov Cc: Christoph Lameter Cc: Dennis Zhou Cc: Eric Dumazet Cc: Mike Rapoport Cc: Nico Pache Cc: Tejun Heo Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 2 +- mm/memory_hotplug.c | 28 +++------------------------- mm/page_alloc.c | 25 +++++++++++++++++++++++-- 3 files changed, 27 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 60f09d3ebb3d..76bf2de86def 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -319,7 +319,7 @@ extern void set_zone_contiguous(struct zone *zone); extern void clear_zone_contiguous(struct zone *zone); #ifdef CONFIG_MEMORY_HOTPLUG -extern void __ref free_area_init_core_hotplug(int nid); +extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory_resource(int nid, struct resource *resource, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 55c3e5309088..a4f69d399929 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1166,39 +1166,16 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) { struct pglist_data *pgdat; - pgdat = NODE_DATA(nid); - /* * NODE_DATA is preallocated (free_area_init) but its internal * state is not allocated completely. Add missing pieces. * Completely offline nodes stay around and they just need * reintialization. */ - if (pgdat->per_cpu_nodestats == &boot_nodestats) { - pgdat->per_cpu_nodestats = - alloc_percpu(struct per_cpu_nodestat); - } else { - int cpu; - /* - * Reset the nr_zones, order and highest_zoneidx before reuse. - * Note that kswapd will init kswapd_highest_zoneidx properly - * when it starts in the near future. - */ - pgdat->nr_zones = 0; - pgdat->kswapd_order = 0; - pgdat->kswapd_highest_zoneidx = 0; - for_each_online_cpu(cpu) { - struct per_cpu_nodestat *p; - - p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); - memset(p, 0, sizeof(*p)); - } - } - - pgdat->node_start_pfn = 0; + pgdat = NODE_DATA(nid); /* init node's zones as empty zones, we don't have any present pages.*/ - free_area_init_core_hotplug(nid); + free_area_init_core_hotplug(pgdat); /* * The node we allocated has no zone fallback lists. For avoiding @@ -1210,6 +1187,7 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) * When memory is hot-added, all the memory is in offline state. So * clear all zones' present_pages because they will be updated in * online_pages() and offline_pages(). + * TODO: should be in free_area_init_core_hotplug? */ reset_node_managed_pages(pgdat); reset_node_present_pages(pgdat); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f141a4e5b64..d32a635f2c72 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7466,12 +7466,33 @@ static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, * NOTE: this function is only called during memory hotplug */ #ifdef CONFIG_MEMORY_HOTPLUG -void __ref free_area_init_core_hotplug(int nid) +void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) { + int nid = pgdat->node_id; enum zone_type z; - pg_data_t *pgdat = NODE_DATA(nid); + int cpu; pgdat_init_internals(pgdat); + + if (pgdat->per_cpu_nodestats == &boot_nodestats) + pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); + + /* + * Reset the nr_zones, order and highest_zoneidx before reuse. + * Note that kswapd will init kswapd_highest_zoneidx properly + * when it starts in the near future. + */ + pgdat->nr_zones = 0; + pgdat->kswapd_order = 0; + pgdat->kswapd_highest_zoneidx = 0; + pgdat->node_start_pfn = 0; + for_each_online_cpu(cpu) { + struct per_cpu_nodestat *p; + + p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); + memset(p, 0, sizeof(*p)); + } + for (z = 0; z < MAX_NR_ZONES; z++) zone_init_internals(&pgdat->node_zones[z], z, nid, 0); } -- cgit v1.2.3-71-gd317 From 2848a28b0a6052a4c8450397d2647d7d8e3f6f06 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 22 Mar 2022 14:47:13 -0700 Subject: drivers/base/node: consolidate node device subsystem initialization in node_dev_init() ... and call node_dev_init() after memory_dev_init() from driver_init(), so before any of the existing arch/subsys calls. All online nodes should be known at that point: early during boot, arch code determines node and zone ranges and sets the relevant nodes online; usually this happens in setup_arch(). This is in line with memory_dev_init(), which initializes the memory device subsystem and creates all memory block devices. Similar to memory_dev_init(), panic() if anything goes wrong, we don't want to continue with such basic initialization errors. The important part is that node_dev_init() gets called after memory_dev_init() and after cpu_dev_init(), but before any of the relevant archs call register_cpu() to register the new cpu device under the node device. The latter should be the case for the current users of topology_init(). Link: https://lkml.kernel.org/r/20220203105212.30385-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Oscar Salvador Tested-by: Anatoly Pugachev (sparc64) Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: Oscar Salvador Cc: Mike Rapoport Cc: Catalin Marinas Cc: Will Deacon Cc: Thomas Bogendoerfer Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Yoshinori Sato Cc: Rich Felker Cc: "David S. Miller" Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/setup.c | 3 --- arch/ia64/kernel/topology.c | 10 ---------- arch/mips/kernel/topology.c | 5 ----- arch/powerpc/kernel/sysfs.c | 17 ----------------- arch/riscv/kernel/setup.c | 3 --- arch/s390/kernel/numa.c | 7 ------- arch/sh/kernel/topology.c | 5 ----- arch/sparc/kernel/sysfs.c | 12 ------------ arch/x86/kernel/topology.c | 5 ----- drivers/base/init.c | 1 + drivers/base/node.c | 30 +++++++++++++++++------------- include/linux/node.h | 4 ++++ 12 files changed, 22 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f70573928f1b..3505789cf4bd 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -406,9 +406,6 @@ static int __init topology_init(void) { int i; - for_each_online_node(i) - register_one_node(i); - for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data.cpu, i); cpu->hotpluggable = cpu_can_disable(i); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index e4992917a24b..94a848b06f15 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -70,16 +70,6 @@ static int __init topology_init(void) { int i, err = 0; -#ifdef CONFIG_NUMA - /* - * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? - */ - for_each_online_node(i) { - if ((err = register_one_node(i))) - goto out; - } -#endif - sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL); if (!sysfs_cpus) panic("kzalloc in topology_init failed - NR_CPUS too big?"); diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c index 08ad6371fbe0..9429d85a4703 100644 --- a/arch/mips/kernel/topology.c +++ b/arch/mips/kernel/topology.c @@ -12,11 +12,6 @@ static int __init topology_init(void) { int i, ret; -#ifdef CONFIG_NUMA - for_each_online_node(i) - register_one_node(i); -#endif /* CONFIG_NUMA */ - for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index d45a415d5374..2069bbb90a9a 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -1110,14 +1110,6 @@ EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group); /* NUMA stuff */ #ifdef CONFIG_NUMA -static void __init register_nodes(void) -{ - int i; - - for (i = 0; i < MAX_NUMNODES; i++) - register_one_node(i); -} - int sysfs_add_device_to_node(struct device *dev, int nid) { struct node *node = node_devices[nid]; @@ -1132,13 +1124,6 @@ void sysfs_remove_device_from_node(struct device *dev, int nid) sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); } EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); - -#else -static void __init register_nodes(void) -{ - return; -} - #endif /* Only valid if CPU is present. */ @@ -1155,8 +1140,6 @@ static int __init topology_init(void) { int cpu, r; - register_nodes(); - for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index b42bfdc67482..834eb652a7b9 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -301,9 +301,6 @@ static int __init topology_init(void) { int i, ret; - for_each_online_node(i) - register_one_node(i); - for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_devices, i); diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c index 51c5a9f6e525..23ab9f02f278 100644 --- a/arch/s390/kernel/numa.c +++ b/arch/s390/kernel/numa.c @@ -33,10 +33,3 @@ void __init numa_setup(void) NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT; NODE_DATA(0)->node_id = 0; } - -static int __init numa_init_late(void) -{ - register_one_node(0); - return 0; -} -arch_initcall(numa_init_late); diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c index 76af6db9daa2..2d2a7509b565 100644 --- a/arch/sh/kernel/topology.c +++ b/arch/sh/kernel/topology.c @@ -46,11 +46,6 @@ static int __init topology_init(void) { int i, ret; -#ifdef CONFIG_NUMA - for_each_online_node(i) - register_one_node(i); -#endif - for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 6d60d416f0dd..f19487e4cc71 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -244,22 +244,10 @@ static void __init check_mmu_stats(void) mmu_stats_supported = 1; } -static void register_nodes(void) -{ -#ifdef CONFIG_NUMA - int i; - - for (i = 0; i < MAX_NUMNODES; i++) - register_one_node(i); -#endif -} - static int __init topology_init(void) { int cpu, ret; - register_nodes(); - check_mmu_stats(); for_each_possible_cpu(cpu) { diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index bd83748e2bde..8617d1ed9d31 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -154,11 +154,6 @@ static int __init topology_init(void) { int i; -#ifdef CONFIG_NUMA - for_each_online_node(i) - register_one_node(i); -#endif - for_each_present_cpu(i) arch_register_cpu(i); diff --git a/drivers/base/init.c b/drivers/base/init.c index a9f57c22fb9e..d8d0fe687111 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -35,5 +35,6 @@ void __init driver_init(void) auxiliary_bus_init(); cpu_dev_init(); memory_dev_init(); + node_dev_init(); container_dev_init(); } diff --git a/drivers/base/node.c b/drivers/base/node.c index 87acc47e8951..a133981a12fc 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -1065,26 +1065,30 @@ static const struct attribute_group *cpu_root_attr_groups[] = { }; #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ -static int __init register_node_type(void) +void __init node_dev_init(void) { - int ret; + static struct notifier_block node_memory_callback_nb = { + .notifier_call = node_memory_callback, + .priority = NODE_CALLBACK_PRI, + }; + int ret, i; BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); - if (!ret) { - static struct notifier_block node_memory_callback_nb = { - .notifier_call = node_memory_callback, - .priority = NODE_CALLBACK_PRI, - }; - register_hotmemory_notifier(&node_memory_callback_nb); - } + if (ret) + panic("%s() failed to register subsystem: %d\n", __func__, ret); + + register_hotmemory_notifier(&node_memory_callback_nb); /* - * Note: we're not going to unregister the node class if we fail - * to register the node state class attribute files. + * Create all node devices, which will properly link the node + * to applicable memory block devices and already created cpu devices. */ - return ret; + for_each_online_node(i) { + ret = register_one_node(i); + if (ret) + panic("%s() failed to add node: %d\n", __func__, ret); + } } -postcore_initcall(register_node_type); diff --git a/include/linux/node.h b/include/linux/node.h index 81bbf1c0afd3..7f876d48af11 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -112,6 +112,7 @@ static inline void link_mem_sections(int nid, unsigned long start_pfn, extern void unregister_node(struct node *node); #ifdef CONFIG_NUMA +extern void node_dev_init(void); /* Core of the node registration - only memory hotplug should use this */ extern int __register_one_node(int nid); @@ -149,6 +150,9 @@ extern void register_hugetlbfs_with_node(node_registration_func_t doregister, node_registration_func_t unregister); #endif #else +static inline void node_dev_init(void) +{ +} static inline int __register_one_node(int nid) { return 0; -- cgit v1.2.3-71-gd317 From cc6515591b25f08ce199e9379844a964f52a27f2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 22 Mar 2022 14:47:28 -0700 Subject: drivers/base/node: rename link_mem_sections() to register_memory_block_under_node() Patch series "drivers/base/memory: determine and store zone for single-zone memory blocks", v2. I remember talking to Michal in the past about removing test_pages_in_a_zone(), which we use for: * verifying that a memory block we intend to offline is really only managed by a single zone. We don't support offlining of memory blocks that are managed by multiple zones (e.g., multiple nodes, DMA and DMA32) * exposing that zone to user space via /sys/devices/system/memory/memory*/valid_zones Now that I identified some more cases where test_pages_in_a_zone() might go wrong, and we received an UBSAN report (see patch #3), let's get rid of this PFN walker. So instead of detecting the zone at runtime with test_pages_in_a_zone() by scanning the memmap, let's determine and remember for each memory block if it's managed by a single zone. The stored zone can then be used for the above two cases, avoiding a manual lookup using test_pages_in_a_zone(). This avoids eventually stumbling over uninitialized memmaps in corner cases, especially when ZONE_DEVICE ranges partly fall into memory block (that are responsible for managing System RAM). Handling memory onlining is easy, because we online to exactly one zone. Handling boot memory is more tricky, because we want to avoid scanning all zones of all nodes to detect possible zones that overlap with the physical memory region of interest. Fortunately, we already have code that determines the applicable nodes for a memory block, to create sysfs links -- we'll hook into that. Patch #1 is a simple cleanup I had laying around for a longer time. Patch #2 contains the main logic to remove test_pages_in_a_zone() and further details. [1] https://lkml.kernel.org/r/20220128144540.153902-1-david@redhat.com [2] https://lkml.kernel.org/r/20220203105212.30385-1-david@redhat.com This patch (of 2): Let's adjust the stale terminology, making it match unregister_memory_block_under_nodes() and do_register_memory_block_under_node(). We're dealing with memory block devices, which span 1..X memory sections. Link: https://lkml.kernel.org/r/20220210184359.235565-1-david@redhat.com Link: https://lkml.kernel.org/r/20220210184359.235565-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Oscar Salvador Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: "Rafael J. Wysocki" Cc: Rafael Parra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 5 +++-- include/linux/node.h | 16 ++++++++-------- mm/memory_hotplug.c | 6 +++--- 3 files changed, 14 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/node.c b/drivers/base/node.c index a133981a12fc..5d75341413ce 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -892,8 +892,9 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, - enum meminit_context context) +void register_memory_blocks_under_node(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context) { walk_memory_blocks_func_t func; diff --git a/include/linux/node.h b/include/linux/node.h index 7f876d48af11..40d641a8bfb0 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,13 +99,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA) -void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context); +void register_memory_blocks_under_node(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else -static inline void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) +static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context) { } #endif @@ -129,8 +129,8 @@ static inline int register_one_node(int nid) error = __register_one_node(nid); if (error) return error; - /* link memory sections under this node */ - link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); + register_memory_blocks_under_node(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ce68098832aa..ed1a5dac6797 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1383,9 +1383,9 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) BUG_ON(ret); } - /* link memory sections under this node.*/ - link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), - MEMINIT_HOTPLUG); + register_memory_blocks_under_node(nid, PFN_DOWN(start), + PFN_UP(start + size - 1), + MEMINIT_HOTPLUG); /* create new memmap entry */ if (!strcmp(res->name, "System RAM")) -- cgit v1.2.3-71-gd317 From 395f6081bad49f9c54abafebab49ee23aa985bbd Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 22 Mar 2022 14:47:31 -0700 Subject: drivers/base/memory: determine and store zone for single-zone memory blocks test_pages_in_a_zone() is just another nasty PFN walker that can easily stumble over ZONE_DEVICE memory ranges falling into the same memory block as ordinary system RAM: the memmap of parts of these ranges might possibly be uninitialized. In fact, we observed (on an older kernel) with UBSAN: UBSAN: Undefined behaviour in ./include/linux/mm.h:1133:50 index 7 is out of range for type 'zone [5]' CPU: 121 PID: 35603 Comm: read_all Kdump: loaded Tainted: [...] Hardware name: Dell Inc. PowerEdge R7425/08V001, BIOS 1.12.2 11/15/2019 Call Trace: dump_stack+0x9a/0xf0 ubsan_epilogue+0x9/0x7a __ubsan_handle_out_of_bounds+0x13a/0x181 test_pages_in_a_zone+0x3c4/0x500 show_valid_zones+0x1fa/0x380 dev_attr_show+0x43/0xb0 sysfs_kf_seq_show+0x1c5/0x440 seq_read+0x49d/0x1190 vfs_read+0xff/0x300 ksys_read+0xb8/0x170 do_syscall_64+0xa5/0x4b0 entry_SYSCALL_64_after_hwframe+0x6a/0xdf RIP: 0033:0x7f01f4439b52 We seem to stumble over a memmap that contains a garbage zone id. While we could try inserting pfn_to_online_page() calls, it will just make memory offlining slower, because we use test_pages_in_a_zone() to make sure we're offlining pages that all belong to the same zone. Let's just get rid of this PFN walker and determine the single zone of a memory block -- if any -- for early memory blocks during boot. For memory onlining, we know the single zone already. Let's avoid any additional memmap scanning and just rely on the zone information available during boot. For memory hot(un)plug, we only really care about memory blocks that: * span a single zone (and, thereby, a single node) * are completely System RAM (IOW, no holes, no ZONE_DEVICE) If one of these conditions is not met, we reject memory offlining. Hotplugged memory blocks (starting out offline), always meet both conditions. There are three scenarios to handle: (1) Memory hot(un)plug A memory block with zone == NULL cannot be offlined, corresponding to our previous test_pages_in_a_zone() check. After successful memory onlining/offlining, we simply set the zone accordingly. * Memory onlining: set the zone we just used for onlining * Memory offlining: set zone = NULL So a hotplugged memory block starts with zone = NULL. Once memory onlining is done, we set the proper zone. (2) Boot memory with !CONFIG_NUMA We know that there is just a single pgdat, so we simply scan all zones of that pgdat for an intersection with our memory block PFN range when adding the memory block. If more than one zone intersects (e.g., DMA and DMA32 on x86 for the first memory block) we set zone = NULL and consequently mimic what test_pages_in_a_zone() used to do. (3) Boot memory with CONFIG_NUMA At the point in time we create the memory block devices during boot, we don't know yet which nodes *actually* span a memory block. While we could scan all zones of all nodes for intersections, overlapping nodes complicate the situation and scanning all nodes is possibly expensive. But that problem has already been solved by the code that sets the node of a memory block and creates the link in the sysfs -- do_register_memory_block_under_node(). So, we hook into the code that sets the node id for a memory block. If we already have a different node id set for the memory block, we know that multiple nodes *actually* have PFNs falling into our memory block: we set zone = NULL and consequently mimic what test_pages_in_a_zone() used to do. If there is no node id set, we do the same as (2) for the given node. Note that the call order in driver_init() is: -> memory_dev_init(): create memory block devices -> node_dev_init(): link memory block devices to the node and set the node id So in summary, we detect if there is a single zone responsible for this memory block and we consequently store the zone in that case in the memory block, updating it during memory onlining/offlining. Link: https://lkml.kernel.org/r/20220210184359.235565-3-david@redhat.com Signed-off-by: David Hildenbrand Reported-by: Rafael Parra Reviewed-by: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: Rafael Parra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 101 +++++++++++++++++++++++++++++++++++++++-- drivers/base/node.c | 13 ++---- include/linux/memory.h | 12 +++++ include/linux/memory_hotplug.h | 6 +-- mm/memory_hotplug.c | 50 ++++---------------- 5 files changed, 125 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 6ee2181adc3f..f75e3467cb59 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -215,6 +215,7 @@ static int memory_block_online(struct memory_block *mem) adjust_present_page_count(pfn_to_page(start_pfn), mem->group, nr_vmemmap_pages); + mem->zone = zone; return ret; } @@ -225,6 +226,9 @@ static int memory_block_offline(struct memory_block *mem) unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; int ret; + if (!mem->zone) + return -EINVAL; + /* * Unaccount before offlining, such that unpopulated zone and kthreads * can properly be torn down in offline_pages(). @@ -234,7 +238,7 @@ static int memory_block_offline(struct memory_block *mem) -nr_vmemmap_pages); ret = offline_pages(start_pfn + nr_vmemmap_pages, - nr_pages - nr_vmemmap_pages, mem->group); + nr_pages - nr_vmemmap_pages, mem->zone, mem->group); if (ret) { /* offline_pages() failed. Account back. */ if (nr_vmemmap_pages) @@ -246,6 +250,7 @@ static int memory_block_offline(struct memory_block *mem) if (nr_vmemmap_pages) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + mem->zone = NULL; return ret; } @@ -411,11 +416,10 @@ static ssize_t valid_zones_show(struct device *dev, */ if (mem->state == MEM_ONLINE) { /* - * The block contains more than one zone can not be offlined. - * This can happen e.g. for ZONE_DMA and ZONE_DMA32 + * If !mem->zone, the memory block spans multiple zones and + * cannot get offlined. */ - default_zone = test_pages_in_a_zone(start_pfn, - start_pfn + nr_pages); + default_zone = mem->zone; if (!default_zone) return sysfs_emit(buf, "%s\n", "none"); len += sysfs_emit_at(buf, len, "%s", default_zone->name); @@ -643,6 +647,82 @@ int register_memory(struct memory_block *memory) return ret; } +static struct zone *early_node_zone_for_memory_block(struct memory_block *mem, + int nid) +{ + const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + struct zone *zone, *matching_zone = NULL; + pg_data_t *pgdat = NODE_DATA(nid); + int i; + + /* + * This logic only works for early memory, when the applicable zones + * already span the memory block. We don't expect overlapping zones on + * a single node for early memory. So if we're told that some PFNs + * of a node fall into this memory block, we can assume that all node + * zones that intersect with the memory block are actually applicable. + * No need to look at the memmap. + */ + for (i = 0; i < MAX_NR_ZONES; i++) { + zone = pgdat->node_zones + i; + if (!populated_zone(zone)) + continue; + if (!zone_intersects(zone, start_pfn, nr_pages)) + continue; + if (!matching_zone) { + matching_zone = zone; + continue; + } + /* Spans multiple zones ... */ + matching_zone = NULL; + break; + } + return matching_zone; +} + +#ifdef CONFIG_NUMA +/** + * memory_block_add_nid() - Indicate that system RAM falling into this memory + * block device (partially) belongs to the given node. + * @mem: The memory block device. + * @nid: The node id. + * @context: The memory initialization context. + * + * Indicate that system RAM falling into this memory block (partially) belongs + * to the given node. If the context indicates ("early") that we are adding the + * node during node device subsystem initialization, this will also properly + * set/adjust mem->zone based on the zone ranges of the given node. + */ +void memory_block_add_nid(struct memory_block *mem, int nid, + enum meminit_context context) +{ + if (context == MEMINIT_EARLY && mem->nid != nid) { + /* + * For early memory we have to determine the zone when setting + * the node id and handle multiple nodes spanning a single + * memory block by indicate via zone == NULL that we're not + * dealing with a single zone. So if we're setting the node id + * the first time, determine if there is a single zone. If we're + * setting the node id a second time to a different node, + * invalidate the single detected zone. + */ + if (mem->nid == NUMA_NO_NODE) + mem->zone = early_node_zone_for_memory_block(mem, nid); + else + mem->zone = NULL; + } + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. If we span multiple nodes (not applicable + * to hotplugged memory), zone == NULL will prohibit memory offlining + * and consequently unplug. + */ + mem->nid = nid; +} +#endif + static int init_memory_block(unsigned long block_id, unsigned long state, unsigned long nr_vmemmap_pages, struct memory_group *group) @@ -665,6 +745,17 @@ static int init_memory_block(unsigned long block_id, unsigned long state, mem->nr_vmemmap_pages = nr_vmemmap_pages; INIT_LIST_HEAD(&mem->group_next); +#ifndef CONFIG_NUMA + if (state == MEM_ONLINE) + /* + * MEM_ONLINE at this point implies early memory. With NUMA, + * we'll determine the zone when setting the node id via + * memory_block_add_nid(). Memory hotplug updated the zone + * manually when memory onlining/offlining succeeds. + */ + mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE); +#endif /* CONFIG_NUMA */ + ret = register_memory(mem); if (ret) return ret; diff --git a/drivers/base/node.c b/drivers/base/node.c index 5d75341413ce..ec8bb24a5a22 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -796,15 +796,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn) } static void do_register_memory_block_under_node(int nid, - struct memory_block *mem_blk) + struct memory_block *mem_blk, + enum meminit_context context) { int ret; - /* - * If this memory block spans multiple nodes, we only indicate - * the last processed node. - */ - mem_blk->nid = nid; + memory_block_add_nid(mem_blk, nid, context); ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, &mem_blk->dev.kobj, @@ -857,7 +854,7 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk, if (page_nid != nid) continue; - do_register_memory_block_under_node(nid, mem_blk); + do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY); return 0; } /* mem section does not span the specified node */ @@ -873,7 +870,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, { int nid = *(int *)arg; - do_register_memory_block_under_node(nid, mem_blk); + do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG); return 0; } diff --git a/include/linux/memory.h b/include/linux/memory.h index 88eb587b5143..aa619464a1df 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -70,6 +70,13 @@ struct memory_block { unsigned long state; /* serialized by the dev->lock */ int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ + /* + * The single zone of this memory block if all PFNs of this memory block + * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are + * managed by a single zone. NULL if multiple zones (including nodes) + * apply. + */ + struct zone *zone; struct device dev; /* * Number of vmemmap pages. These pages @@ -161,6 +168,11 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, }) #define register_hotmemory_notifier(nb) register_memory_notifier(nb) #define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb) + +#ifdef CONFIG_NUMA +void memory_block_add_nid(struct memory_block *mem, int nid, + enum meminit_context context); +#endif /* CONFIG_NUMA */ #endif /* CONFIG_MEMORY_HOTPLUG */ /* diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 76bf2de86def..1ce6f8044f1e 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -163,8 +163,6 @@ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); -extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, - unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn); @@ -293,7 +291,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group); + struct zone *zone, struct memory_group *group); extern int remove_memory(u64 start, u64 size); extern void __remove_memory(u64 start, u64 size); extern int offline_and_remove_memory(u64 start, u64 size); @@ -302,7 +300,7 @@ extern int offline_and_remove_memory(u64 start, u64 size); static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group) + struct zone *zone, struct memory_group *group) { return -EINVAL; } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ed1a5dac6797..aee69281dad6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1548,38 +1548,6 @@ bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) } #ifdef CONFIG_MEMORY_HOTREMOVE -/* - * Confirm all pages in a range [start, end) belong to the same zone (skipping - * memory holes). When true, return the zone. - */ -struct zone *test_pages_in_a_zone(unsigned long start_pfn, - unsigned long end_pfn) -{ - unsigned long pfn, sec_end_pfn; - struct zone *zone = NULL; - struct page *page; - - for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); - pfn < end_pfn; - pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { - /* Make sure the memory section is present first */ - if (!present_section_nr(pfn_to_section_nr(pfn))) - continue; - for (; pfn < sec_end_pfn && pfn < end_pfn; - pfn += MAX_ORDER_NR_PAGES) { - /* Check if we got outside of the zone */ - if (zone && !zone_spans_pfn(zone, pfn)) - return NULL; - page = pfn_to_page(pfn); - if (zone && page_zone(page) != zone) - return NULL; - zone = page_zone(page); - } - } - - return zone; -} - /* * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, * non-lru movable pages and hugepages). Will skip over most unmovable @@ -1803,15 +1771,15 @@ static int count_system_ram_pages_cb(unsigned long start_pfn, } int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group) + struct zone *zone, struct memory_group *group) { const unsigned long end_pfn = start_pfn + nr_pages; unsigned long pfn, system_ram_pages = 0; + const int node = zone_to_nid(zone); unsigned long flags; - struct zone *zone; struct memory_notify arg; - int ret, node; char *reason; + int ret; /* * {on,off}lining is constrained to full memory sections (or more @@ -1843,15 +1811,17 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, goto failed_removal; } - /* This makes hotplug much easier...and readable. - we assume this for now. .*/ - zone = test_pages_in_a_zone(start_pfn, end_pfn); - if (!zone) { + /* + * We only support offlining of memory blocks managed by a single zone, + * checked by calling code. This is just a sanity check that we might + * want to remove in the future. + */ + if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone || + page_zone(pfn_to_page(end_pfn - 1)) != zone)) { ret = -EINVAL; reason = "multizone range"; goto failed_removal; } - node = zone_to_nid(zone); /* * Disable pcplists so that page isolation cannot race with freeing -- cgit v1.2.3-71-gd317 From 734c15700cdf9062ae98d8b131c6fe873dfad26d Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 22 Mar 2022 14:47:37 -0700 Subject: mm: only re-generate demotion targets when a numa node changes its N_CPU state Abhishek reported that after patch [1], hotplug operations are taking roughly double the expected time. [2] The reason behind is that the CPU callbacks that migrate_on_reclaim_init() sets always call set_migration_target_nodes() whenever a CPU is brought up/down. But we only care about numa nodes going from having cpus to become cpuless, and vice versa, as that influences the demotion_target order. We do already have two CPU callbacks (vmstat_cpu_online() and vmstat_cpu_dead()) that check exactly that, so get rid of the CPU callbacks in migrate_on_reclaim_init() and only call set_migration_target_nodes() from vmstat_cpu_{dead,online}() whenever a numa node change its N_CPU state. [1] https://lore.kernel.org/linux-mm/20210721063926.3024591-2-ying.huang@intel.com/ [2] https://lore.kernel.org/linux-mm/eb438ddd-2919-73d4-bd9f-b7eecdd9577a@linux.vnet.ibm.com/ [osalvador@suse.de: add feedback from Huang Ying] Link: https://lkml.kernel.org/r/20220314150945.12694-1-osalvador@suse.de Link: https://lkml.kernel.org/r/20220310120749.23077-1-osalvador@suse.de Fixes: 884a6e5d1f93b ("mm/migrate: update node demotion order on hotplug events") Signed-off-by: Oscar Salvador Reviewed-by: Baolin Wang Tested-by: Baolin Wang Reported-by: Abhishek Goel Cc: Dave Hansen Cc: "Huang, Ying" Cc: Abhishek Goel Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 8 ++++++++ mm/migrate.c | 47 ++++++++++------------------------------------- mm/vmstat.c | 13 ++++++++++++- 3 files changed, 30 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index db96e10eb8da..90e75d5a54d6 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -48,7 +48,15 @@ int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count); extern bool numa_demotion_enabled; +extern void migrate_on_reclaim_init(void); +#ifdef CONFIG_HOTPLUG_CPU +extern void set_migration_target_nodes(void); #else +static inline void set_migration_target_nodes(void) {} +#endif +#else + +static inline void set_migration_target_nodes(void) {} static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, diff --git a/mm/migrate.c b/mm/migrate.c index 78b2cf87946d..bc9da3fd01aa 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -3209,7 +3209,7 @@ again: /* * For callers that do not hold get_online_mems() already. */ -static void set_migration_target_nodes(void) +void set_migration_target_nodes(void) { get_online_mems(); __set_migration_target_nodes(); @@ -3273,51 +3273,24 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, return notifier_from_errno(0); } -/* - * React to hotplug events that might affect the migration targets - * like events that online or offline NUMA nodes. - * - * The ordering is also currently dependent on which nodes have - * CPUs. That means we need CPU on/offline notification too. - */ -static int migration_online_cpu(unsigned int cpu) -{ - set_migration_target_nodes(); - return 0; -} - -static int migration_offline_cpu(unsigned int cpu) +void __init migrate_on_reclaim_init(void) { - set_migration_target_nodes(); - return 0; -} - -static int __init migrate_on_reclaim_init(void) -{ - int ret; - node_demotion = kmalloc_array(nr_node_ids, sizeof(struct demotion_nodes), GFP_KERNEL); WARN_ON(!node_demotion); - ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline", - NULL, migration_offline_cpu); + hotplug_memory_notifier(migrate_on_reclaim_callback, 100); /* - * In the unlikely case that this fails, the automatic - * migration targets may become suboptimal for nodes - * where N_CPU changes. With such a small impact in a - * rare case, do not bother trying to do anything special. + * At this point, all numa nodes with memory/CPus have their state + * properly set, so we can build the demotion order now. + * Let us hold the cpu_hotplug lock just, as we could possibily have + * CPU hotplug events during boot. */ - WARN_ON(ret < 0); - ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online", - migration_online_cpu, NULL); - WARN_ON(ret < 0); - - hotplug_memory_notifier(migrate_on_reclaim_callback, 100); - return 0; + cpus_read_lock(); + set_migration_target_nodes(); + cpus_read_unlock(); } -late_initcall(migrate_on_reclaim_init); #endif /* CONFIG_HOTPLUG_CPU */ bool numa_demotion_enabled = false; diff --git a/mm/vmstat.c b/mm/vmstat.c index d5cc8d739fac..b75b1a64b54c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "internal.h" @@ -2049,7 +2050,12 @@ static void __init init_cpu_node_state(void) static int vmstat_cpu_online(unsigned int cpu) { refresh_zone_stat_thresholds(); - node_set_state(cpu_to_node(cpu), N_CPU); + + if (!node_state(cpu_to_node(cpu), N_CPU)) { + node_set_state(cpu_to_node(cpu), N_CPU); + set_migration_target_nodes(); + } + return 0; } @@ -2072,6 +2078,8 @@ static int vmstat_cpu_dead(unsigned int cpu) return 0; node_clear_state(node, N_CPU); + set_migration_target_nodes(); + return 0; } @@ -2103,6 +2111,9 @@ void __init init_mm_internals(void) start_shepherd_timer(); #endif +#if defined(CONFIG_MIGRATION) && defined(CONFIG_HOTPLUG_CPU) + migrate_on_reclaim_init(); +#endif #ifdef CONFIG_PROC_FS proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op); proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op); -- cgit v1.2.3-71-gd317 From 6eada26ffc80bfe1f2db088be0c44ec82b5cd3dc Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 22 Mar 2022 14:47:46 -0700 Subject: mm: remove usercopy_warn() Users of usercopy_warn() were removed by commit 53944f171a89 ("mm: remove HARDENED_USERCOPY_FALLBACK") Remove it. Link: https://lkml.kernel.org/r/5f26643fc70b05f8455b60b99c30c17d635fa640.1644231910.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Reviewed-by: Miaohe Lin Reviewed-by: Stephen Kitt Reviewed-by: Muchun Song Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/uaccess.h | 2 -- mm/usercopy.c | 11 ----------- 2 files changed, 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ac0394087f7d..bca27b4e5eb2 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -401,8 +401,6 @@ static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY -void usercopy_warn(const char *name, const char *detail, bool to_user, - unsigned long offset, unsigned long len); void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); diff --git a/mm/usercopy.c b/mm/usercopy.c index d0d268135d96..e7b0cb49daa1 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -70,17 +70,6 @@ static noinline int check_stack_object(const void *obj, unsigned long len) * kmem_cache_create_usercopy() function to create the cache (and * carefully audit the whitelist range). */ -void usercopy_warn(const char *name, const char *detail, bool to_user, - unsigned long offset, unsigned long len) -{ - WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", - to_user ? "exposure" : "overwrite", - to_user ? "from" : "to", - name ? : "unknown?!", - detail ? " '" : "", detail ? : "", detail ? "'" : "", - offset, len); -} - void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len) -- cgit v1.2.3-71-gd317 From ad7489d5262d2aa775b5e5a1782793925fa90065 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 22 Mar 2022 14:47:49 -0700 Subject: mm: uninline copy_overflow() While building a small config with CONFIG_CC_OPTIMISE_FOR_SIZE, I ended up with more than 50 times the following function in vmlinux because GCC doesn't honor the 'inline' keyword: c00243bc : c00243bc: 94 21 ff f0 stwu r1,-16(r1) c00243c0: 7c 85 23 78 mr r5,r4 c00243c4: 7c 64 1b 78 mr r4,r3 c00243c8: 3c 60 c0 62 lis r3,-16286 c00243cc: 7c 08 02 a6 mflr r0 c00243d0: 38 63 5e e5 addi r3,r3,24293 c00243d4: 90 01 00 14 stw r0,20(r1) c00243d8: 4b ff 82 45 bl c001c61c <__warn_printk> c00243dc: 0f e0 00 00 twui r0,0 c00243e0: 80 01 00 14 lwz r0,20(r1) c00243e4: 38 21 00 10 addi r1,r1,16 c00243e8: 7c 08 03 a6 mtlr r0 c00243ec: 4e 80 00 20 blr With -Winline, GCC tells: /include/linux/thread_info.h:212:20: warning: inlining failed in call to 'copy_overflow': call is unlikely and code size would grow [-Winline] copy_overflow() is a non conditional warning called by check_copy_size() on an error path. check_copy_size() have to remain inlined in order to benefit from constant folding, but copy_overflow() is not worth inlining. Uninline the warning when CONFIG_BUG is selected. When CONFIG_BUG is not selected, WARN() does nothing so skip it. This reduces the size of vmlinux by almost 4kbytes. Link: https://lkml.kernel.org/r/e1723b9cfa924bcefcd41f69d0025b38e4c9364e.1644819985.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Cc: David Laight Cc: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/thread_info.h | 5 ++++- mm/maccess.c | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 73a6f34b3847..9f392ec76f2b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -209,9 +209,12 @@ __bad_copy_from(void); extern void __compiletime_error("copy destination size is too small") __bad_copy_to(void); +void __copy_overflow(int size, unsigned long count); + static inline void copy_overflow(int size, unsigned long count) { - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); + if (IS_ENABLED(CONFIG_BUG)) + __copy_overflow(size, count); } static __always_inline __must_check bool diff --git a/mm/maccess.c b/mm/maccess.c index d3f1a1f0b1c1..3fed2b876539 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -335,3 +335,9 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count) return ret; } + +void __copy_overflow(int size, unsigned long count) +{ + WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); +} +EXPORT_SYMBOL(__copy_overflow); -- cgit v1.2.3-71-gd317 From d7ca25c53e25a9a628aaa19b5a031f115d8c353d Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 22 Mar 2022 14:47:58 -0700 Subject: highmem: document kunmap_local() Some users of kmap() add an offset to the kmap() address to be used during the mapping. When converting to kmap_local_page() the base address does not need to be stored because any address within the page can be used in kunmap_local(). However, this was not clear from the documentation and cause some questions.[1] Document that any address in the page can be used in kunmap_local() to clarify this for future users. [1] https://lore.kernel.org/lkml/20211213154543.GM3538886@iweiny-DESK2.sc.intel.com/ [ira.weiny@intel.com: updates per Christoph] Link: https://lkml.kernel.org/r/20220124182138.816693-1-ira.weiny@intel.com Link: https://lkml.kernel.org/r/20220124013045.806718-1-ira.weiny@intel.com Signed-off-by: Ira Weiny Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/highmem-internal.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 0a0b2b09b1b8..a77be5630209 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -246,6 +246,16 @@ do { \ __kunmap_atomic(__addr); \ } while (0) +/** + * kunmap_local - Unmap a page mapped via kmap_local_page(). + * @__addr: An address within the page mapped + * + * @__addr can be any address within the mapped page. Commonly it is the + * address return from kmap_local_page(), but it can also include offsets. + * + * Unmapping should be done in the reverse order of the mapping. See + * kmap_local_page() for details. + */ #define kunmap_local(__addr) \ do { \ BUILD_BUG_ON(__same_type((__addr), struct page *)); \ -- cgit v1.2.3-71-gd317 From 436428255d5981e49ff015fc8e398ecf2ba10c24 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:48:37 -0700 Subject: mm/damon/core: move damon_set_targets() into dbgfs damon_set_targets() function is defined in the core for general use cases, but called from only dbgfs. Also, because the function is for general use cases, dbgfs does additional handling of pid type target id case. To make the situation simpler, this commit moves the function into dbgfs and makes it to do the pid type case handling on its own. Link: https://lkml.kernel.org/r/20211230100723.2238-4-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 2 -- mm/damon/core-test.h | 5 ++++- mm/damon/core.c | 32 ------------------------------- mm/damon/dbgfs-test.h | 14 +++++++------- mm/damon/dbgfs.c | 53 +++++++++++++++++++++++++++++++++++++++------------ 5 files changed, 52 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index 5e1e3a128b77..bd021af5db3d 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -484,8 +484,6 @@ unsigned int damon_nr_regions(struct damon_target *t); struct damon_ctx *damon_new_ctx(void); void damon_destroy_ctx(struct damon_ctx *ctx); -int damon_set_targets(struct damon_ctx *ctx, - unsigned long *ids, ssize_t nr_ids); int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg); diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index 7008c3735e99..4a6141ddd6fc 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -86,7 +86,10 @@ static void damon_test_aggregate(struct kunit *test) struct damon_region *r; int it, ir; - damon_set_targets(ctx, target_ids, 3); + for (it = 0; it < 3; it++) { + t = damon_new_target(target_ids[it]); + damon_add_target(ctx, t); + } it = 0; damon_for_each_target(t, ctx) { diff --git a/mm/damon/core.c b/mm/damon/core.c index 1dd153c31c9e..3fef5c667a31 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -245,38 +245,6 @@ void damon_destroy_ctx(struct damon_ctx *ctx) kfree(ctx); } -/** - * damon_set_targets() - Set monitoring targets. - * @ctx: monitoring context - * @ids: array of target ids - * @nr_ids: number of entries in @ids - * - * This function should not be called while the kdamond is running. - * - * Return: 0 on success, negative error code otherwise. - */ -int damon_set_targets(struct damon_ctx *ctx, - unsigned long *ids, ssize_t nr_ids) -{ - ssize_t i; - struct damon_target *t, *next; - - damon_destroy_targets(ctx); - - for (i = 0; i < nr_ids; i++) { - t = damon_new_target(ids[i]); - if (!t) { - /* The caller should do cleanup of the ids itself */ - damon_for_each_target_safe(t, next, ctx) - damon_destroy_target(t); - return -ENOMEM; - } - damon_add_target(ctx, t); - } - - return 0; -} - /** * damon_set_attrs() - Set attributes for the monitoring. * @ctx: monitoring context diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h index 00bff058fe08..c1c988b607bc 100644 --- a/mm/damon/dbgfs-test.h +++ b/mm/damon/dbgfs-test.h @@ -86,23 +86,23 @@ static void damon_dbgfs_test_set_targets(struct kunit *test) ctx->primitive.target_valid = NULL; ctx->primitive.cleanup = NULL; - damon_set_targets(ctx, ids, 3); + dbgfs_set_targets(ctx, ids, 3); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2 3\n"); - damon_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, NULL, 0); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - damon_set_targets(ctx, (unsigned long []){1, 2}, 2); + dbgfs_set_targets(ctx, (unsigned long []){1, 2}, 2); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2\n"); - damon_set_targets(ctx, (unsigned long []){2}, 1); + dbgfs_set_targets(ctx, (unsigned long []){2}, 1); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "2\n"); - damon_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, NULL, 0); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); @@ -130,7 +130,7 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test) int i, rc; char buf[256]; - damon_set_targets(ctx, ids, 3); + dbgfs_set_targets(ctx, ids, 3); /* Put valid inputs and check the results */ for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) { @@ -158,7 +158,7 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test) KUNIT_EXPECT_STREQ(test, (char *)buf, ""); } - damon_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, NULL, 0); damon_destroy_ctx(ctx); } diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 3f65af04e4e6..58867b966635 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -358,11 +358,48 @@ static void dbgfs_put_pids(unsigned long *ids, int nr_ids) put_pid((struct pid *)ids[i]); } +/* + * dbgfs_set_targets() - Set monitoring targets. + * @ctx: monitoring context + * @ids: array of target ids + * @nr_ids: number of entries in @ids + * + * This function should not be called while the kdamond is running. + * + * Return: 0 on success, negative error code otherwise. + */ +static int dbgfs_set_targets(struct damon_ctx *ctx, + unsigned long *ids, ssize_t nr_ids) +{ + ssize_t i; + struct damon_target *t, *next; + + damon_for_each_target_safe(t, next, ctx) { + if (targetid_is_pid(ctx)) + put_pid((struct pid *)t->id); + damon_destroy_target(t); + } + + for (i = 0; i < nr_ids; i++) { + t = damon_new_target(ids[i]); + if (!t) { + /* The caller should do cleanup of the ids itself */ + damon_for_each_target_safe(t, next, ctx) + damon_destroy_target(t); + if (targetid_is_pid(ctx)) + dbgfs_put_pids(ids, nr_ids); + return -ENOMEM; + } + damon_add_target(ctx, t); + } + + return 0; +} + static ssize_t dbgfs_target_ids_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct damon_ctx *ctx = file->private_data; - struct damon_target *t, *next_t; bool id_is_pid = true; char *kbuf; unsigned long *targets; @@ -407,11 +444,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file, } /* remove previously set targets */ - damon_for_each_target_safe(t, next_t, ctx) { - if (targetid_is_pid(ctx)) - put_pid((struct pid *)t->id); - damon_destroy_target(t); - } + dbgfs_set_targets(ctx, NULL, 0); /* Configure the context for the address space type */ if (id_is_pid) @@ -419,13 +452,9 @@ static ssize_t dbgfs_target_ids_write(struct file *file, else damon_pa_set_primitives(ctx); - ret = damon_set_targets(ctx, targets, nr_targets); - if (ret) { - if (id_is_pid) - dbgfs_put_pids(targets, nr_targets); - } else { + ret = dbgfs_set_targets(ctx, targets, nr_targets); + if (!ret) ret = count; - } unlock_out: mutex_unlock(&ctx->kdamond_lock); -- cgit v1.2.3-71-gd317 From 1971bd630452e943380429336a851c55b027eed1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:48:40 -0700 Subject: mm/damon: remove the target id concept DAMON asks each monitoring target ('struct damon_target') to have one 'unsigned long' integer called 'id', which should be unique among the targets of same monitoring context. Meaning of it is, however, totally up to the monitoring primitives that registered to the monitoring context. For example, the virtual address spaces monitoring primitives treats the id as a 'struct pid' pointer. This makes the code flexible, but ugly, not well-documented, and type-unsafe[1]. Also, identification of each target can be done via its index. For the reason, this commit removes the concept and uses clear type definition. For now, only 'struct pid' pointer is used for the virtual address spaces monitoring. If DAMON is extended in future so that we need to put another identifier field in the struct, we will use a union for such primitives-dependent fields and document which primitives are using which type. [1] https://lore.kernel.org/linux-mm/20211013154535.4aaeaaf9d0182922e405dd1e@linux-foundation.org/ Link: https://lkml.kernel.org/r/20211230100723.2238-5-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 11 ++-- mm/damon/core-test.h | 18 +++--- mm/damon/core.c | 4 +- mm/damon/dbgfs-test.h | 63 ++++++++------------- mm/damon/dbgfs.c | 152 ++++++++++++++++++++++++++++++-------------------- mm/damon/reclaim.c | 3 +- mm/damon/vaddr-test.h | 6 +- mm/damon/vaddr.c | 4 +- 8 files changed, 133 insertions(+), 128 deletions(-) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index bd021af5db3d..7c1d915b3587 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -60,19 +60,18 @@ struct damon_region { /** * struct damon_target - Represents a monitoring target. - * @id: Unique identifier for this target. + * @pid: The PID of the virtual address space to monitor. * @nr_regions: Number of monitoring target regions of this target. * @regions_list: Head of the monitoring target regions of this target. * @list: List head for siblings. * * Each monitoring context could have multiple targets. For example, a context * for virtual memory address spaces could have multiple target processes. The - * @id of each target should be unique among the targets of the context. For - * example, in the virtual address monitoring context, it could be a pidfd or - * an address of an mm_struct. + * @pid should be set for appropriate address space monitoring primitives + * including the virtual address spaces monitoring primitives. */ struct damon_target { - unsigned long id; + struct pid *pid; unsigned int nr_regions; struct list_head regions_list; struct list_head list; @@ -475,7 +474,7 @@ struct damos *damon_new_scheme( void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_destroy_scheme(struct damos *s); -struct damon_target *damon_new_target(unsigned long id); +struct damon_target *damon_new_target(void); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); bool damon_targets_empty(struct damon_ctx *ctx); void damon_free_target(struct damon_target *t); diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index 4a6141ddd6fc..b4085deb9fa0 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -24,7 +24,7 @@ static void damon_test_regions(struct kunit *test) KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); - t = damon_new_target(42); + t = damon_new_target(); KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); damon_add_region(r, t); @@ -52,8 +52,7 @@ static void damon_test_target(struct kunit *test) struct damon_ctx *c = damon_new_ctx(); struct damon_target *t; - t = damon_new_target(42); - KUNIT_EXPECT_EQ(test, 42ul, t->id); + t = damon_new_target(); KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); damon_add_target(c, t); @@ -78,7 +77,6 @@ static void damon_test_target(struct kunit *test) static void damon_test_aggregate(struct kunit *test) { struct damon_ctx *ctx = damon_new_ctx(); - unsigned long target_ids[] = {1, 2, 3}; unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; @@ -87,7 +85,7 @@ static void damon_test_aggregate(struct kunit *test) int it, ir; for (it = 0; it < 3; it++) { - t = damon_new_target(target_ids[it]); + t = damon_new_target(); damon_add_target(ctx, t); } @@ -125,7 +123,7 @@ static void damon_test_split_at(struct kunit *test) struct damon_target *t; struct damon_region *r; - t = damon_new_target(42); + t = damon_new_target(); r = damon_new_region(0, 100); damon_add_region(r, t); damon_split_region_at(c, t, r, 25); @@ -146,7 +144,7 @@ static void damon_test_merge_two(struct kunit *test) struct damon_region *r, *r2, *r3; int i; - t = damon_new_target(42); + t = damon_new_target(); r = damon_new_region(0, 100); r->nr_accesses = 10; damon_add_region(r, t); @@ -194,7 +192,7 @@ static void damon_test_merge_regions_of(struct kunit *test) unsigned long eaddrs[] = {112, 130, 156, 170, 230}; int i; - t = damon_new_target(42); + t = damon_new_target(); for (i = 0; i < ARRAY_SIZE(sa); i++) { r = damon_new_region(sa[i], ea[i]); r->nr_accesses = nrs[i]; @@ -218,14 +216,14 @@ static void damon_test_split_regions_of(struct kunit *test) struct damon_target *t; struct damon_region *r; - t = damon_new_target(42); + t = damon_new_target(); r = damon_new_region(0, 22); damon_add_region(r, t); damon_split_regions_of(c, t, 2); KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); damon_free_target(t); - t = damon_new_target(42); + t = damon_new_target(); r = damon_new_region(0, 220); damon_add_region(r, t); damon_split_regions_of(c, t, 4); diff --git a/mm/damon/core.c b/mm/damon/core.c index 3fef5c667a31..bf495236d741 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -144,7 +144,7 @@ void damon_destroy_scheme(struct damos *s) * * Returns the pointer to the new struct if success, or NULL otherwise */ -struct damon_target *damon_new_target(unsigned long id) +struct damon_target *damon_new_target(void) { struct damon_target *t; @@ -152,7 +152,7 @@ struct damon_target *damon_new_target(unsigned long id) if (!t) return NULL; - t->id = id; + t->pid = NULL; t->nr_regions = 0; INIT_LIST_HEAD(&t->regions_list); diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h index c1c988b607bc..0d3a14c00acf 100644 --- a/mm/damon/dbgfs-test.h +++ b/mm/damon/dbgfs-test.h @@ -12,66 +12,58 @@ #include -static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) +static void damon_dbgfs_test_str_to_ints(struct kunit *test) { char *question; - unsigned long *answers; - unsigned long expected[] = {12, 35, 46}; + int *answers; + int expected[] = {12, 35, 46}; ssize_t nr_integers = 0, i; question = "123"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123ul, answers[0]); + KUNIT_EXPECT_EQ(test, 123, answers[0]); kfree(answers); question = "123abc"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123ul, answers[0]); + KUNIT_EXPECT_EQ(test, 123, answers[0]); kfree(answers); question = "a123"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); question = "12 35"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); for (i = 0; i < nr_integers; i++) KUNIT_EXPECT_EQ(test, expected[i], answers[i]); kfree(answers); question = "12 35 46"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); for (i = 0; i < nr_integers; i++) KUNIT_EXPECT_EQ(test, expected[i], answers[i]); kfree(answers); question = "12 35 abc 46"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); for (i = 0; i < 2; i++) KUNIT_EXPECT_EQ(test, expected[i], answers[i]); kfree(answers); question = ""; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); question = "\n"; - answers = str_to_target_ids(question, strlen(question), - &nr_integers); + answers = str_to_ints(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); } @@ -79,30 +71,20 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) static void damon_dbgfs_test_set_targets(struct kunit *test) { struct damon_ctx *ctx = dbgfs_new_ctx(); - unsigned long ids[] = {1, 2, 3}; char buf[64]; - /* Make DAMON consider target id as plain number */ - ctx->primitive.target_valid = NULL; - ctx->primitive.cleanup = NULL; + /* Make DAMON consider target has no pid */ + ctx->primitive = (struct damon_primitive){}; - dbgfs_set_targets(ctx, ids, 3); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2 3\n"); - - dbgfs_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, 0, NULL); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - dbgfs_set_targets(ctx, (unsigned long []){1, 2}, 2); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2\n"); - - dbgfs_set_targets(ctx, (unsigned long []){2}, 1); + dbgfs_set_targets(ctx, 1, NULL); sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "2\n"); + KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n"); - dbgfs_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, 0, NULL); sprint_target_ids(ctx, buf, 64); KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); @@ -112,7 +94,6 @@ static void damon_dbgfs_test_set_targets(struct kunit *test) static void damon_dbgfs_test_set_init_regions(struct kunit *test) { struct damon_ctx *ctx = damon_new_ctx(); - unsigned long ids[] = {1, 2, 3}; /* Each line represents one region in `` `` */ char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45", "1 10 20\n", @@ -130,7 +111,7 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test) int i, rc; char buf[256]; - dbgfs_set_targets(ctx, ids, 3); + dbgfs_set_targets(ctx, 3, NULL); /* Put valid inputs and check the results */ for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) { @@ -158,12 +139,12 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test) KUNIT_EXPECT_STREQ(test, (char *)buf, ""); } - dbgfs_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, 0, NULL); damon_destroy_ctx(ctx); } static struct kunit_case damon_test_cases[] = { - KUNIT_CASE(damon_dbgfs_test_str_to_target_ids), + KUNIT_CASE(damon_dbgfs_test_str_to_ints), KUNIT_CASE(damon_dbgfs_test_set_targets), KUNIT_CASE(damon_dbgfs_test_set_init_regions), {}, diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 58867b966635..78ff645433c6 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -275,7 +275,7 @@ out: return ret; } -static inline bool targetid_is_pid(const struct damon_ctx *ctx) +static inline bool target_has_pid(const struct damon_ctx *ctx) { return ctx->primitive.target_valid == damon_va_target_valid; } @@ -283,17 +283,19 @@ static inline bool targetid_is_pid(const struct damon_ctx *ctx) static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) { struct damon_target *t; - unsigned long id; + int id; int written = 0; int rc; damon_for_each_target(t, ctx) { - id = t->id; - if (targetid_is_pid(ctx)) + if (target_has_pid(ctx)) /* Show pid numbers to debugfs users */ - id = (unsigned long)pid_vnr((struct pid *)id); + id = pid_vnr(t->pid); + else + /* Show 42 for physical address space, just for fun */ + id = 42; - rc = scnprintf(&buf[written], len - written, "%lu ", id); + rc = scnprintf(&buf[written], len - written, "%d ", id); if (!rc) return -ENOMEM; written += rc; @@ -321,75 +323,114 @@ static ssize_t dbgfs_target_ids_read(struct file *file, } /* - * Converts a string into an array of unsigned long integers + * Converts a string into an integers array * - * Returns an array of unsigned long integers if the conversion success, or - * NULL otherwise. + * Returns an array of integers array if the conversion success, or NULL + * otherwise. */ -static unsigned long *str_to_target_ids(const char *str, ssize_t len, - ssize_t *nr_ids) +static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints) { - unsigned long *ids; - const int max_nr_ids = 32; - unsigned long id; + int *array; + const int max_nr_ints = 32; + int nr; int pos = 0, parsed, ret; - *nr_ids = 0; - ids = kmalloc_array(max_nr_ids, sizeof(id), GFP_KERNEL); - if (!ids) + *nr_ints = 0; + array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL); + if (!array) return NULL; - while (*nr_ids < max_nr_ids && pos < len) { - ret = sscanf(&str[pos], "%lu%n", &id, &parsed); + while (*nr_ints < max_nr_ints && pos < len) { + ret = sscanf(&str[pos], "%d%n", &nr, &parsed); pos += parsed; if (ret != 1) break; - ids[*nr_ids] = id; - *nr_ids += 1; + array[*nr_ints] = nr; + *nr_ints += 1; } - return ids; + return array; } -static void dbgfs_put_pids(unsigned long *ids, int nr_ids) +static void dbgfs_put_pids(struct pid **pids, int nr_pids) { int i; - for (i = 0; i < nr_ids; i++) - put_pid((struct pid *)ids[i]); + for (i = 0; i < nr_pids; i++) + put_pid(pids[i]); +} + +/* + * Converts a string into an struct pid pointers array + * + * Returns an array of struct pid pointers if the conversion success, or NULL + * otherwise. + */ +static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids) +{ + int *ints; + ssize_t nr_ints; + struct pid **pids; + + *nr_pids = 0; + + ints = str_to_ints(str, len, &nr_ints); + if (!ints) + return NULL; + + pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL); + if (!pids) + goto out; + + for (; *nr_pids < nr_ints; (*nr_pids)++) { + pids[*nr_pids] = find_get_pid(ints[*nr_pids]); + if (!pids[*nr_pids]) { + dbgfs_put_pids(pids, *nr_pids); + kfree(ints); + kfree(pids); + return NULL; + } + } + +out: + kfree(ints); + return pids; } /* * dbgfs_set_targets() - Set monitoring targets. * @ctx: monitoring context - * @ids: array of target ids - * @nr_ids: number of entries in @ids + * @nr_targets: number of targets + * @pids: array of target pids (size is same to @nr_targets) * - * This function should not be called while the kdamond is running. + * This function should not be called while the kdamond is running. @pids is + * ignored if the context is not configured to have pid in each target. On + * failure, reference counts of all pids in @pids are decremented. * * Return: 0 on success, negative error code otherwise. */ -static int dbgfs_set_targets(struct damon_ctx *ctx, - unsigned long *ids, ssize_t nr_ids) +static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets, + struct pid **pids) { ssize_t i; struct damon_target *t, *next; damon_for_each_target_safe(t, next, ctx) { - if (targetid_is_pid(ctx)) - put_pid((struct pid *)t->id); + if (target_has_pid(ctx)) + put_pid(t->pid); damon_destroy_target(t); } - for (i = 0; i < nr_ids; i++) { - t = damon_new_target(ids[i]); + for (i = 0; i < nr_targets; i++) { + t = damon_new_target(); if (!t) { - /* The caller should do cleanup of the ids itself */ damon_for_each_target_safe(t, next, ctx) damon_destroy_target(t); - if (targetid_is_pid(ctx)) - dbgfs_put_pids(ids, nr_ids); + if (target_has_pid(ctx)) + dbgfs_put_pids(pids, nr_targets); return -ENOMEM; } + if (target_has_pid(ctx)) + t->pid = pids[i]; damon_add_target(ctx, t); } @@ -402,10 +443,9 @@ static ssize_t dbgfs_target_ids_write(struct file *file, struct damon_ctx *ctx = file->private_data; bool id_is_pid = true; char *kbuf; - unsigned long *targets; + struct pid **target_pids = NULL; ssize_t nr_targets; ssize_t ret; - int i; kbuf = user_input_str(buf, count, ppos); if (IS_ERR(kbuf)) @@ -413,38 +453,27 @@ static ssize_t dbgfs_target_ids_write(struct file *file, if (!strncmp(kbuf, "paddr\n", count)) { id_is_pid = false; - /* target id is meaningless here, but we set it just for fun */ - scnprintf(kbuf, count, "42 "); - } - - targets = str_to_target_ids(kbuf, count, &nr_targets); - if (!targets) { - ret = -ENOMEM; - goto out; + nr_targets = 1; } if (id_is_pid) { - for (i = 0; i < nr_targets; i++) { - targets[i] = (unsigned long)find_get_pid( - (int)targets[i]); - if (!targets[i]) { - dbgfs_put_pids(targets, i); - ret = -EINVAL; - goto free_targets_out; - } + target_pids = str_to_pids(kbuf, count, &nr_targets); + if (!target_pids) { + ret = -ENOMEM; + goto out; } } mutex_lock(&ctx->kdamond_lock); if (ctx->kdamond) { if (id_is_pid) - dbgfs_put_pids(targets, nr_targets); + dbgfs_put_pids(target_pids, nr_targets); ret = -EBUSY; goto unlock_out; } /* remove previously set targets */ - dbgfs_set_targets(ctx, NULL, 0); + dbgfs_set_targets(ctx, 0, NULL); /* Configure the context for the address space type */ if (id_is_pid) @@ -452,14 +481,13 @@ static ssize_t dbgfs_target_ids_write(struct file *file, else damon_pa_set_primitives(ctx); - ret = dbgfs_set_targets(ctx, targets, nr_targets); + ret = dbgfs_set_targets(ctx, nr_targets, target_pids); if (!ret) ret = count; unlock_out: mutex_unlock(&ctx->kdamond_lock); -free_targets_out: - kfree(targets); + kfree(target_pids); out: kfree(kbuf); return ret; @@ -688,12 +716,12 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx) { struct damon_target *t, *next; - if (!targetid_is_pid(ctx)) + if (!target_has_pid(ctx)) return; mutex_lock(&ctx->kdamond_lock); damon_for_each_target_safe(t, next, ctx) { - put_pid((struct pid *)t->id); + put_pid(t->pid); damon_destroy_target(t); } mutex_unlock(&ctx->kdamond_lock); diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index bc476cef688e..29da37192e4a 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -387,8 +387,7 @@ static int __init damon_reclaim_init(void) damon_pa_set_primitives(ctx); ctx->callback.after_aggregation = damon_reclaim_after_aggregation; - /* 4242 means nothing but fun */ - target = damon_new_target(4242); + target = damon_new_target(); if (!target) { damon_destroy_ctx(ctx); return -ENOMEM; diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index 6a1b9272ea12..f0d0ba591792 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -139,7 +139,7 @@ static void damon_do_test_apply_three_regions(struct kunit *test, struct damon_region *r; int i; - t = damon_new_target(42); + t = damon_new_target(); for (i = 0; i < nr_regions / 2; i++) { r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); damon_add_region(r, t); @@ -251,7 +251,7 @@ static void damon_test_apply_three_regions4(struct kunit *test) static void damon_test_split_evenly_fail(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { - struct damon_target *t = damon_new_target(42); + struct damon_target *t = damon_new_target(); struct damon_region *r = damon_new_region(start, end); damon_add_region(r, t); @@ -270,7 +270,7 @@ static void damon_test_split_evenly_fail(struct kunit *test, static void damon_test_split_evenly_succ(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { - struct damon_target *t = damon_new_target(42); + struct damon_target *t = damon_new_target(); struct damon_region *r = damon_new_region(start, end); unsigned long expected_width = (end - start) / nr_pieces; unsigned long i = 0; diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 89b6468da2b9..f98edb90a873 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -23,12 +23,12 @@ #endif /* - * 't->id' should be the pointer to the relevant 'struct pid' having reference + * 't->pid' should be the pointer to the relevant 'struct pid' having reference * count. Caller must put the returned task, unless it is NULL. */ static inline struct task_struct *damon_get_task_struct(struct damon_target *t) { - return get_pid_task((struct pid *)t->id, PIDTYPE_PID); + return get_pid_task(t->pid, PIDTYPE_PID); } /* -- cgit v1.2.3-71-gd317 From f7d911c39cbbb88d625216a0cfd0517a3047c46e Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:48:46 -0700 Subject: mm/damon: rename damon_primitives to damon_operations Patch series "Allow DAMON user code independent of monitoring primitives". In-kernel DAMON user code is required to configure the monitoring context (struct damon_ctx) with proper monitoring primitives (struct damon_primitive). This makes the user code dependent to all supporting monitoring primitives. For example, DAMON debugfs interface depends on both DAMON_VADDR and DAMON_PADDR, though some users have interest in only one use case. As more monitoring primitives are introduced, the problem will be bigger. To minimize such unnecessary dependency, this patchset makes monitoring primitives can be registered by the implemnting code and later dynamically searched and selected by the user code. In addition to that, this patchset renames monitoring primitives to monitoring operations, which is more easy to intuitively understand what it means and how it would be structed. This patch (of 8): DAMON has a set of callback functions called monitoring primitives and let it can be configured with various implementations for easy extension for different address spaces and usages. However, the word 'primitive' is not so explicit. Meanwhile, many other structs resembles similar purpose calls themselves 'operations'. To make the code easier to be understood, this commit renames 'damon_primitives' to 'damon_operations' before it is too late to rename. Link: https://lkml.kernel.org/r/20220215184603.1479-1-sj@kernel.org Link: https://lkml.kernel.org/r/20220215184603.1479-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Xin Hao Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 48 ++++++++--------- mm/damon/Kconfig | 12 ++--- mm/damon/Makefile | 4 +- mm/damon/core.c | 65 +++++++++++------------ mm/damon/dbgfs-test.h | 2 +- mm/damon/dbgfs.c | 10 ++-- mm/damon/ops-common.c | 133 ++++++++++++++++++++++++++++++++++++++++++++++++ mm/damon/ops-common.h | 16 ++++++ mm/damon/paddr.c | 22 ++++---- mm/damon/prmtv-common.c | 133 ------------------------------------------------ mm/damon/prmtv-common.h | 16 ------ mm/damon/reclaim.c | 2 +- mm/damon/vaddr-test.h | 2 +- mm/damon/vaddr.c | 22 ++++---- 14 files changed, 244 insertions(+), 243 deletions(-) create mode 100644 mm/damon/ops-common.c create mode 100644 mm/damon/ops-common.h delete mode 100644 mm/damon/prmtv-common.c delete mode 100644 mm/damon/prmtv-common.h (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index 7c1d915b3587..00baeb42c18e 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -67,8 +67,8 @@ struct damon_region { * * Each monitoring context could have multiple targets. For example, a context * for virtual memory address spaces could have multiple target processes. The - * @pid should be set for appropriate address space monitoring primitives - * including the virtual address spaces monitoring primitives. + * @pid should be set for appropriate &struct damon_operations including the + * virtual address spaces monitoring operations. */ struct damon_target { struct pid *pid; @@ -120,9 +120,9 @@ enum damos_action { * uses smaller one as the effective quota. * * For selecting regions within the quota, DAMON prioritizes current scheme's - * target memory regions using the &struct damon_primitive->get_scheme_score. + * target memory regions using the &struct damon_operations->get_scheme_score. * You could customize the prioritization logic by setting &weight_sz, - * &weight_nr_accesses, and &weight_age, because monitoring primitives are + * &weight_nr_accesses, and &weight_age, because monitoring operations are * encouraged to respect those. */ struct damos_quota { @@ -256,10 +256,10 @@ struct damos { struct damon_ctx; /** - * struct damon_primitive - Monitoring primitives for given use cases. + * struct damon_operations - Monitoring operations for given use cases. * - * @init: Initialize primitive-internal data structures. - * @update: Update primitive-internal data structures. + * @init: Initialize operations-related data structures. + * @update: Update operations-related data structures. * @prepare_access_checks: Prepare next access check of target regions. * @check_accesses: Check the accesses to target regions. * @reset_aggregated: Reset aggregated accesses monitoring results. @@ -269,18 +269,18 @@ struct damon_ctx; * @cleanup: Clean up the context. * * DAMON can be extended for various address spaces and usages. For this, - * users should register the low level primitives for their target address - * space and usecase via the &damon_ctx.primitive. Then, the monitoring thread + * users should register the low level operations for their target address + * space and usecase via the &damon_ctx.ops. Then, the monitoring thread * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting - * the monitoring, @update after each &damon_ctx.primitive_update_interval, and + * the monitoring, @update after each &damon_ctx.ops_update_interval, and * @check_accesses, @target_valid and @prepare_access_checks after each * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each * &damon_ctx.aggr_interval. * - * @init should initialize primitive-internal data structures. For example, + * @init should initialize operations-related data structures. For example, * this could be used to construct proper monitoring target regions and link * those to @damon_ctx.adaptive_targets. - * @update should update the primitive-internal data structures. For example, + * @update should update the operations-related data structures. For example, * this could be used to update monitoring target regions for current status. * @prepare_access_checks should manipulate the monitoring regions to be * prepared for the next access check. @@ -300,7 +300,7 @@ struct damon_ctx; * monitoring. * @cleanup is called from @kdamond just before its termination. */ -struct damon_primitive { +struct damon_operations { void (*init)(struct damon_ctx *context); void (*update)(struct damon_ctx *context); void (*prepare_access_checks)(struct damon_ctx *context); @@ -354,15 +354,15 @@ struct damon_callback { * * @sample_interval: The time between access samplings. * @aggr_interval: The time between monitor results aggregations. - * @primitive_update_interval: The time between monitoring primitive updates. + * @ops_update_interval: The time between monitoring operations updates. * * For each @sample_interval, DAMON checks whether each region is accessed or * not. It aggregates and keeps the access information (number of accesses to * each region) for @aggr_interval time. DAMON also checks whether the target * memory regions need update (e.g., by ``mmap()`` calls from the application, * in case of virtual memory monitoring) and applies the changes for each - * @primitive_update_interval. All time intervals are in micro-seconds. - * Please refer to &struct damon_primitive and &struct damon_callback for more + * @ops_update_interval. All time intervals are in micro-seconds. + * Please refer to &struct damon_operations and &struct damon_callback for more * detail. * * @kdamond: Kernel thread who does the monitoring. @@ -374,7 +374,7 @@ struct damon_callback { * * Once started, the monitoring thread runs until explicitly required to be * terminated or every monitoring target is invalid. The validity of the - * targets is checked via the &damon_primitive.target_valid of @primitive. The + * targets is checked via the &damon_operations.target_valid of @ops. The * termination can also be explicitly requested by writing non-zero to * @kdamond_stop. The thread sets @kdamond to NULL when it terminates. * Therefore, users can know whether the monitoring is ongoing or terminated by @@ -384,7 +384,7 @@ struct damon_callback { * Note that the monitoring thread protects only @kdamond and @kdamond_stop via * @kdamond_lock. Accesses to other fields must be protected by themselves. * - * @primitive: Set of monitoring primitives for given use cases. + * @ops: Set of monitoring operations for given use cases. * @callback: Set of callbacks for monitoring events notifications. * * @min_nr_regions: The minimum number of adaptive monitoring regions. @@ -395,17 +395,17 @@ struct damon_callback { struct damon_ctx { unsigned long sample_interval; unsigned long aggr_interval; - unsigned long primitive_update_interval; + unsigned long ops_update_interval; /* private: internal use only */ struct timespec64 last_aggregation; - struct timespec64 last_primitive_update; + struct timespec64 last_ops_update; /* public: */ struct task_struct *kdamond; struct mutex kdamond_lock; - struct damon_primitive primitive; + struct damon_operations ops; struct damon_callback callback; unsigned long min_nr_regions; @@ -484,7 +484,7 @@ unsigned int damon_nr_regions(struct damon_target *t); struct damon_ctx *damon_new_ctx(void); void damon_destroy_ctx(struct damon_ctx *ctx); int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, - unsigned long aggr_int, unsigned long primitive_upd_int, + unsigned long aggr_int, unsigned long ops_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg); int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes); @@ -497,12 +497,12 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); #ifdef CONFIG_DAMON_VADDR bool damon_va_target_valid(void *t); -void damon_va_set_primitives(struct damon_ctx *ctx); +void damon_va_set_operations(struct damon_ctx *ctx); #endif /* CONFIG_DAMON_VADDR */ #ifdef CONFIG_DAMON_PADDR bool damon_pa_target_valid(void *t); -void damon_pa_set_primitives(struct damon_ctx *ctx); +void damon_pa_set_operations(struct damon_ctx *ctx); #endif /* CONFIG_DAMON_PADDR */ #endif /* _DAMON_H */ diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index 5bcf05851ad0..01bad77ad7ae 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -25,27 +25,27 @@ config DAMON_KUNIT_TEST If unsure, say N. config DAMON_VADDR - bool "Data access monitoring primitives for virtual address spaces" + bool "Data access monitoring operations for virtual address spaces" depends on DAMON && MMU select PAGE_IDLE_FLAG help - This builds the default data access monitoring primitives for DAMON + This builds the default data access monitoring operations for DAMON that work for virtual address spaces. config DAMON_PADDR - bool "Data access monitoring primitives for the physical address space" + bool "Data access monitoring operations for the physical address space" depends on DAMON && MMU select PAGE_IDLE_FLAG help - This builds the default data access monitoring primitives for DAMON + This builds the default data access monitoring operations for DAMON that works for the physical address space. config DAMON_VADDR_KUNIT_TEST - bool "Test for DAMON primitives" if !KUNIT_ALL_TESTS + bool "Test for DAMON operations" if !KUNIT_ALL_TESTS depends on DAMON_VADDR && KUNIT=y default KUNIT_ALL_TESTS help - This builds the DAMON virtual addresses primitives Kunit test suite. + This builds the DAMON virtual addresses operations Kunit test suite. For more information on KUnit and unit tests in general, please refer to the KUnit documentation. diff --git a/mm/damon/Makefile b/mm/damon/Makefile index f7d5ac377a2b..03931472991a 100644 --- a/mm/damon/Makefile +++ b/mm/damon/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DAMON) := core.o -obj-$(CONFIG_DAMON_VADDR) += prmtv-common.o vaddr.o -obj-$(CONFIG_DAMON_PADDR) += prmtv-common.o paddr.o +obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o +obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_RECLAIM) += reclaim.o diff --git a/mm/damon/core.c b/mm/damon/core.c index bf495236d741..be93fb1c3473 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -204,10 +204,10 @@ struct damon_ctx *damon_new_ctx(void) ctx->sample_interval = 5 * 1000; ctx->aggr_interval = 100 * 1000; - ctx->primitive_update_interval = 60 * 1000 * 1000; + ctx->ops_update_interval = 60 * 1000 * 1000; ktime_get_coarse_ts64(&ctx->last_aggregation); - ctx->last_primitive_update = ctx->last_aggregation; + ctx->last_ops_update = ctx->last_aggregation; mutex_init(&ctx->kdamond_lock); @@ -224,8 +224,8 @@ static void damon_destroy_targets(struct damon_ctx *ctx) { struct damon_target *t, *next_t; - if (ctx->primitive.cleanup) { - ctx->primitive.cleanup(ctx); + if (ctx->ops.cleanup) { + ctx->ops.cleanup(ctx); return; } @@ -250,7 +250,7 @@ void damon_destroy_ctx(struct damon_ctx *ctx) * @ctx: monitoring context * @sample_int: time interval between samplings * @aggr_int: time interval between aggregations - * @primitive_upd_int: time interval between monitoring primitive updates + * @ops_upd_int: time interval between monitoring operations updates * @min_nr_reg: minimal number of regions * @max_nr_reg: maximum number of regions * @@ -260,7 +260,7 @@ void damon_destroy_ctx(struct damon_ctx *ctx) * Return: 0 on success, negative error code otherwise. */ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, - unsigned long aggr_int, unsigned long primitive_upd_int, + unsigned long aggr_int, unsigned long ops_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg) { if (min_nr_reg < 3) @@ -270,7 +270,7 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, ctx->sample_interval = sample_int; ctx->aggr_interval = aggr_int; - ctx->primitive_update_interval = primitive_upd_int; + ctx->ops_update_interval = ops_upd_int; ctx->min_nr_regions = min_nr_reg; ctx->max_nr_regions = max_nr_reg; @@ -516,10 +516,10 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, { bool ret = __damos_valid_target(r, s); - if (!ret || !s->quota.esz || !c->primitive.get_scheme_score) + if (!ret || !s->quota.esz || !c->ops.get_scheme_score) return ret; - return c->primitive.get_scheme_score(c, t, r, s) >= s->quota.min_score; + return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; } static void damon_do_apply_schemes(struct damon_ctx *c, @@ -576,7 +576,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, continue; /* Apply the scheme */ - if (c->primitive.apply_scheme) { + if (c->ops.apply_scheme) { if (quota->esz && quota->charged_sz + sz > quota->esz) { sz = ALIGN_DOWN(quota->esz - quota->charged_sz, @@ -586,7 +586,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, damon_split_region_at(c, t, r, sz); } ktime_get_coarse_ts64(&begin); - sz_applied = c->primitive.apply_scheme(c, t, r, s); + sz_applied = c->ops.apply_scheme(c, t, r, s); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - timespec64_to_ns(&begin); @@ -660,7 +660,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) damos_set_effective_quota(quota); } - if (!c->primitive.get_scheme_score) + if (!c->ops.get_scheme_score) continue; /* Fill up the score histogram */ @@ -669,7 +669,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) damon_for_each_region(r, t) { if (!__damos_valid_target(r, s)) continue; - score = c->primitive.get_scheme_score( + score = c->ops.get_scheme_score( c, t, r, s); quota->histogram[score] += r->ar.end - r->ar.start; @@ -848,14 +848,15 @@ static void kdamond_split_regions(struct damon_ctx *ctx) } /* - * Check whether it is time to check and apply the target monitoring regions + * Check whether it is time to check and apply the operations-related data + * structures. * * Returns true if it is. */ -static bool kdamond_need_update_primitive(struct damon_ctx *ctx) +static bool kdamond_need_update_operations(struct damon_ctx *ctx) { - return damon_check_reset_time_interval(&ctx->last_primitive_update, - ctx->primitive_update_interval); + return damon_check_reset_time_interval(&ctx->last_ops_update, + ctx->ops_update_interval); } /* @@ -873,11 +874,11 @@ static bool kdamond_need_stop(struct damon_ctx *ctx) if (kthread_should_stop()) return true; - if (!ctx->primitive.target_valid) + if (!ctx->ops.target_valid) return false; damon_for_each_target(t, ctx) { - if (ctx->primitive.target_valid(t)) + if (ctx->ops.target_valid(t)) return false; } @@ -976,8 +977,8 @@ static int kdamond_fn(void *data) pr_debug("kdamond (%d) starts\n", current->pid); - if (ctx->primitive.init) - ctx->primitive.init(ctx); + if (ctx->ops.init) + ctx->ops.init(ctx); if (ctx->callback.before_start && ctx->callback.before_start(ctx)) done = true; @@ -987,16 +988,16 @@ static int kdamond_fn(void *data) if (kdamond_wait_activation(ctx)) continue; - if (ctx->primitive.prepare_access_checks) - ctx->primitive.prepare_access_checks(ctx); + if (ctx->ops.prepare_access_checks) + ctx->ops.prepare_access_checks(ctx); if (ctx->callback.after_sampling && ctx->callback.after_sampling(ctx)) done = true; kdamond_usleep(ctx->sample_interval); - if (ctx->primitive.check_accesses) - max_nr_accesses = ctx->primitive.check_accesses(ctx); + if (ctx->ops.check_accesses) + max_nr_accesses = ctx->ops.check_accesses(ctx); if (kdamond_aggregate_interval_passed(ctx)) { kdamond_merge_regions(ctx, @@ -1008,13 +1009,13 @@ static int kdamond_fn(void *data) kdamond_apply_schemes(ctx); kdamond_reset_aggregated(ctx); kdamond_split_regions(ctx); - if (ctx->primitive.reset_aggregated) - ctx->primitive.reset_aggregated(ctx); + if (ctx->ops.reset_aggregated) + ctx->ops.reset_aggregated(ctx); } - if (kdamond_need_update_primitive(ctx)) { - if (ctx->primitive.update) - ctx->primitive.update(ctx); + if (kdamond_need_update_operations(ctx)) { + if (ctx->ops.update) + ctx->ops.update(ctx); sz_limit = damon_region_sz_limit(ctx); } } @@ -1025,8 +1026,8 @@ static int kdamond_fn(void *data) if (ctx->callback.before_terminate) ctx->callback.before_terminate(ctx); - if (ctx->primitive.cleanup) - ctx->primitive.cleanup(ctx); + if (ctx->ops.cleanup) + ctx->ops.cleanup(ctx); pr_debug("kdamond (%d) finishes\n", current->pid); mutex_lock(&ctx->kdamond_lock); diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h index 0d3a14c00acf..8f7f32595055 100644 --- a/mm/damon/dbgfs-test.h +++ b/mm/damon/dbgfs-test.h @@ -74,7 +74,7 @@ static void damon_dbgfs_test_set_targets(struct kunit *test) char buf[64]; /* Make DAMON consider target has no pid */ - ctx->primitive = (struct damon_primitive){}; + ctx->ops = (struct damon_operations){}; dbgfs_set_targets(ctx, 0, NULL); sprint_target_ids(ctx, buf, 64); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 78ff645433c6..719278a8cc5e 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -56,7 +56,7 @@ static ssize_t dbgfs_attrs_read(struct file *file, mutex_lock(&ctx->kdamond_lock); ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n", ctx->sample_interval, ctx->aggr_interval, - ctx->primitive_update_interval, ctx->min_nr_regions, + ctx->ops_update_interval, ctx->min_nr_regions, ctx->max_nr_regions); mutex_unlock(&ctx->kdamond_lock); @@ -277,7 +277,7 @@ out: static inline bool target_has_pid(const struct damon_ctx *ctx) { - return ctx->primitive.target_valid == damon_va_target_valid; + return ctx->ops.target_valid == damon_va_target_valid; } static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) @@ -477,9 +477,9 @@ static ssize_t dbgfs_target_ids_write(struct file *file, /* Configure the context for the address space type */ if (id_is_pid) - damon_va_set_primitives(ctx); + damon_va_set_operations(ctx); else - damon_pa_set_primitives(ctx); + damon_pa_set_operations(ctx); ret = dbgfs_set_targets(ctx, nr_targets, target_pids); if (!ret) @@ -735,7 +735,7 @@ static struct damon_ctx *dbgfs_new_ctx(void) if (!ctx) return NULL; - damon_va_set_primitives(ctx); + damon_va_set_operations(ctx); ctx->callback.before_terminate = dbgfs_before_terminate; return ctx; } diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c new file mode 100644 index 000000000000..e346cc10d143 --- /dev/null +++ b/mm/damon/ops-common.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common Primitives for Data Access Monitoring + * + * Author: SeongJae Park + */ + +#include +#include +#include +#include + +#include "ops-common.h" + +/* + * Get an online page for a pfn if it's in the LRU list. Otherwise, returns + * NULL. + * + * The body of this function is stolen from the 'page_idle_get_page()'. We + * steal rather than reuse it because the code is quite simple. + */ +struct page *damon_get_page(unsigned long pfn) +{ + struct page *page = pfn_to_online_page(pfn); + + if (!page || !PageLRU(page) || !get_page_unless_zero(page)) + return NULL; + + if (unlikely(!PageLRU(page))) { + put_page(page); + page = NULL; + } + return page; +} + +void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) +{ + bool referenced = false; + struct page *page = damon_get_page(pte_pfn(*pte)); + + if (!page) + return; + + if (pte_young(*pte)) { + referenced = true; + *pte = pte_mkold(*pte); + } + +#ifdef CONFIG_MMU_NOTIFIER + if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE)) + referenced = true; +#endif /* CONFIG_MMU_NOTIFIER */ + + if (referenced) + set_page_young(page); + + set_page_idle(page); + put_page(page); +} + +void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + bool referenced = false; + struct page *page = damon_get_page(pmd_pfn(*pmd)); + + if (!page) + return; + + if (pmd_young(*pmd)) { + referenced = true; + *pmd = pmd_mkold(*pmd); + } + +#ifdef CONFIG_MMU_NOTIFIER + if (mmu_notifier_clear_young(mm, addr, + addr + ((1UL) << HPAGE_PMD_SHIFT))) + referenced = true; +#endif /* CONFIG_MMU_NOTIFIER */ + + if (referenced) + set_page_young(page); + + set_page_idle(page); + put_page(page); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +} + +#define DAMON_MAX_SUBSCORE (100) +#define DAMON_MAX_AGE_IN_LOG (32) + +int damon_pageout_score(struct damon_ctx *c, struct damon_region *r, + struct damos *s) +{ + unsigned int max_nr_accesses; + int freq_subscore; + unsigned int age_in_sec; + int age_in_log, age_subscore; + unsigned int freq_weight = s->quota.weight_nr_accesses; + unsigned int age_weight = s->quota.weight_age; + int hotness; + + max_nr_accesses = c->aggr_interval / c->sample_interval; + freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses; + + age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000; + for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec; + age_in_log++, age_in_sec >>= 1) + ; + + /* If frequency is 0, higher age means it's colder */ + if (freq_subscore == 0) + age_in_log *= -1; + + /* + * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG]. + * Scale it to be in [0, 100] and set it as age subscore. + */ + age_in_log += DAMON_MAX_AGE_IN_LOG; + age_subscore = age_in_log * DAMON_MAX_SUBSCORE / + DAMON_MAX_AGE_IN_LOG / 2; + + hotness = (freq_weight * freq_subscore + age_weight * age_subscore); + if (freq_weight + age_weight) + hotness /= freq_weight + age_weight; + /* + * Transform it to fit in [0, DAMOS_MAX_SCORE] + */ + hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE; + + /* Return coldness of the region */ + return DAMOS_MAX_SCORE - hotness; +} diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h new file mode 100644 index 000000000000..e790cb5f8fe0 --- /dev/null +++ b/mm/damon/ops-common.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common Primitives for Data Access Monitoring + * + * Author: SeongJae Park + */ + +#include + +struct page *damon_get_page(unsigned long pfn); + +void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr); +void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr); + +int damon_pageout_score(struct damon_ctx *c, struct damon_region *r, + struct damos *s); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5e8244f65a1a..9f0abd0369bc 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -14,7 +14,7 @@ #include #include "../internal.h" -#include "prmtv-common.h" +#include "ops-common.h" static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) @@ -261,15 +261,15 @@ static int damon_pa_scheme_score(struct damon_ctx *context, return DAMOS_MAX_SCORE; } -void damon_pa_set_primitives(struct damon_ctx *ctx) +void damon_pa_set_operations(struct damon_ctx *ctx) { - ctx->primitive.init = NULL; - ctx->primitive.update = NULL; - ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks; - ctx->primitive.check_accesses = damon_pa_check_accesses; - ctx->primitive.reset_aggregated = NULL; - ctx->primitive.target_valid = damon_pa_target_valid; - ctx->primitive.cleanup = NULL; - ctx->primitive.apply_scheme = damon_pa_apply_scheme; - ctx->primitive.get_scheme_score = damon_pa_scheme_score; + ctx->ops.init = NULL; + ctx->ops.update = NULL; + ctx->ops.prepare_access_checks = damon_pa_prepare_access_checks; + ctx->ops.check_accesses = damon_pa_check_accesses; + ctx->ops.reset_aggregated = NULL; + ctx->ops.target_valid = damon_pa_target_valid; + ctx->ops.cleanup = NULL; + ctx->ops.apply_scheme = damon_pa_apply_scheme; + ctx->ops.get_scheme_score = damon_pa_scheme_score; } diff --git a/mm/damon/prmtv-common.c b/mm/damon/prmtv-common.c deleted file mode 100644 index 92a04f5831d6..000000000000 --- a/mm/damon/prmtv-common.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Common Primitives for Data Access Monitoring - * - * Author: SeongJae Park - */ - -#include -#include -#include -#include - -#include "prmtv-common.h" - -/* - * Get an online page for a pfn if it's in the LRU list. Otherwise, returns - * NULL. - * - * The body of this function is stolen from the 'page_idle_get_page()'. We - * steal rather than reuse it because the code is quite simple. - */ -struct page *damon_get_page(unsigned long pfn) -{ - struct page *page = pfn_to_online_page(pfn); - - if (!page || !PageLRU(page) || !get_page_unless_zero(page)) - return NULL; - - if (unlikely(!PageLRU(page))) { - put_page(page); - page = NULL; - } - return page; -} - -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) -{ - bool referenced = false; - struct page *page = damon_get_page(pte_pfn(*pte)); - - if (!page) - return; - - if (pte_young(*pte)) { - referenced = true; - *pte = pte_mkold(*pte); - } - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE)) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) - set_page_young(page); - - set_page_idle(page); - put_page(page); -} - -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - bool referenced = false; - struct page *page = damon_get_page(pmd_pfn(*pmd)); - - if (!page) - return; - - if (pmd_young(*pmd)) { - referenced = true; - *pmd = pmd_mkold(*pmd); - } - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, - addr + ((1UL) << HPAGE_PMD_SHIFT))) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) - set_page_young(page); - - set_page_idle(page); - put_page(page); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -} - -#define DAMON_MAX_SUBSCORE (100) -#define DAMON_MAX_AGE_IN_LOG (32) - -int damon_pageout_score(struct damon_ctx *c, struct damon_region *r, - struct damos *s) -{ - unsigned int max_nr_accesses; - int freq_subscore; - unsigned int age_in_sec; - int age_in_log, age_subscore; - unsigned int freq_weight = s->quota.weight_nr_accesses; - unsigned int age_weight = s->quota.weight_age; - int hotness; - - max_nr_accesses = c->aggr_interval / c->sample_interval; - freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses; - - age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000; - for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec; - age_in_log++, age_in_sec >>= 1) - ; - - /* If frequency is 0, higher age means it's colder */ - if (freq_subscore == 0) - age_in_log *= -1; - - /* - * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG]. - * Scale it to be in [0, 100] and set it as age subscore. - */ - age_in_log += DAMON_MAX_AGE_IN_LOG; - age_subscore = age_in_log * DAMON_MAX_SUBSCORE / - DAMON_MAX_AGE_IN_LOG / 2; - - hotness = (freq_weight * freq_subscore + age_weight * age_subscore); - if (freq_weight + age_weight) - hotness /= freq_weight + age_weight; - /* - * Transform it to fit in [0, DAMOS_MAX_SCORE] - */ - hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE; - - /* Return coldness of the region */ - return DAMOS_MAX_SCORE - hotness; -} diff --git a/mm/damon/prmtv-common.h b/mm/damon/prmtv-common.h deleted file mode 100644 index e790cb5f8fe0..000000000000 --- a/mm/damon/prmtv-common.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Common Primitives for Data Access Monitoring - * - * Author: SeongJae Park - */ - -#include - -struct page *damon_get_page(unsigned long pfn); - -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr); -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr); - -int damon_pageout_score(struct damon_ctx *c, struct damon_region *r, - struct damos *s); diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 29da37192e4a..3c93095c793c 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -384,7 +384,7 @@ static int __init damon_reclaim_init(void) if (!ctx) return -ENOMEM; - damon_pa_set_primitives(ctx); + damon_pa_set_operations(ctx); ctx->callback.after_aggregation = damon_reclaim_after_aggregation; target = damon_new_target(); diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index f0d0ba591792..1a55bb6c36c3 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -314,7 +314,7 @@ static struct kunit_case damon_test_cases[] = { }; static struct kunit_suite damon_test_suite = { - .name = "damon-primitives", + .name = "damon-operations", .test_cases = damon_test_cases, }; kunit_test_suite(damon_test_suite); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 6d3454dd3204..c0eb32025f9b 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -15,7 +15,7 @@ #include #include -#include "prmtv-common.h" +#include "ops-common.h" #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST #undef DAMON_MIN_REGION @@ -739,17 +739,17 @@ static int damon_va_scheme_score(struct damon_ctx *context, return DAMOS_MAX_SCORE; } -void damon_va_set_primitives(struct damon_ctx *ctx) +void damon_va_set_operations(struct damon_ctx *ctx) { - ctx->primitive.init = damon_va_init; - ctx->primitive.update = damon_va_update; - ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks; - ctx->primitive.check_accesses = damon_va_check_accesses; - ctx->primitive.reset_aggregated = NULL; - ctx->primitive.target_valid = damon_va_target_valid; - ctx->primitive.cleanup = NULL; - ctx->primitive.apply_scheme = damon_va_apply_scheme; - ctx->primitive.get_scheme_score = damon_va_scheme_score; + ctx->ops.init = damon_va_init; + ctx->ops.update = damon_va_update; + ctx->ops.prepare_access_checks = damon_va_prepare_access_checks; + ctx->ops.check_accesses = damon_va_check_accesses; + ctx->ops.reset_aggregated = NULL; + ctx->ops.target_valid = damon_va_target_valid; + ctx->ops.cleanup = NULL; + ctx->ops.apply_scheme = damon_va_apply_scheme; + ctx->ops.get_scheme_score = damon_va_scheme_score; } #include "vaddr-test.h" -- cgit v1.2.3-71-gd317 From 9f7b053a0f6121f89e00d1688bfca0bf278caa25 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:48:49 -0700 Subject: mm/damon: let monitoring operations can be registered and selected In-kernel DAMON user code like DAMON debugfs interface should set 'struct damon_operations' of its 'struct damon_ctx' on its own. Therefore, the client code should depend on all supporting monitoring operations implementations that it could use. For example, DAMON debugfs interface depends on both vaddr and paddr, while some of the users are not always interested in both. To minimize such unnecessary dependencies, this commit makes the monitoring operations can be registered by implementing code and then dynamically selected by the user code without build-time dependency. Link: https://lkml.kernel.org/r/20220215184603.1479-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: David Rientjes Cc: Xin Hao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 18 ++++++++++++++ mm/damon/core.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index 00baeb42c18e..076da277b249 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -253,11 +253,24 @@ struct damos { struct list_head list; }; +/** + * enum damon_ops_id - Identifier for each monitoring operations implementation + * + * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces + * @DAMON_OPS_PADDR: Monitoring operations for the physical address space + */ +enum damon_ops_id { + DAMON_OPS_VADDR, + DAMON_OPS_PADDR, + NR_DAMON_OPS, +}; + struct damon_ctx; /** * struct damon_operations - Monitoring operations for given use cases. * + * @id: Identifier of this operations set. * @init: Initialize operations-related data structures. * @update: Update operations-related data structures. * @prepare_access_checks: Prepare next access check of target regions. @@ -277,6 +290,8 @@ struct damon_ctx; * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each * &damon_ctx.aggr_interval. * + * Each &struct damon_operations instance having valid @id can be registered + * via damon_register_ops() and selected by damon_select_ops() later. * @init should initialize operations-related data structures. For example, * this could be used to construct proper monitoring target regions and link * those to @damon_ctx.adaptive_targets. @@ -301,6 +316,7 @@ struct damon_ctx; * @cleanup is called from @kdamond just before its termination. */ struct damon_operations { + enum damon_ops_id id; void (*init)(struct damon_ctx *context); void (*update)(struct damon_ctx *context); void (*prepare_access_checks)(struct damon_ctx *context); @@ -489,6 +505,8 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes); int damon_nr_running_ctxs(void); +int damon_register_ops(struct damon_operations *ops); +int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); int damon_start(struct damon_ctx **ctxs, int nr_ctxs); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); diff --git a/mm/damon/core.c b/mm/damon/core.c index be93fb1c3473..82e0a4620c4f 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -25,6 +25,72 @@ static DEFINE_MUTEX(damon_lock); static int nr_running_ctxs; +static DEFINE_MUTEX(damon_ops_lock); +static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; + +/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ +static bool damon_registered_ops_id(enum damon_ops_id id) +{ + struct damon_operations empty_ops = {}; + + if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) + return false; + return true; +} + +/** + * damon_register_ops() - Register a monitoring operations set to DAMON. + * @ops: monitoring operations set to register. + * + * This function registers a monitoring operations set of valid &struct + * damon_operations->id so that others can find and use them later. + * + * Return: 0 on success, negative error code otherwise. + */ +int damon_register_ops(struct damon_operations *ops) +{ + int err = 0; + + if (ops->id >= NR_DAMON_OPS) + return -EINVAL; + mutex_lock(&damon_ops_lock); + /* Fail for already registered ops */ + if (damon_registered_ops_id(ops->id)) { + err = -EINVAL; + goto out; + } + damon_registered_ops[ops->id] = *ops; +out: + mutex_unlock(&damon_ops_lock); + return err; +} + +/** + * damon_select_ops() - Select a monitoring operations to use with the context. + * @ctx: monitoring context to use the operations. + * @id: id of the registered monitoring operations to select. + * + * This function finds registered monitoring operations set of @id and make + * @ctx to use it. + * + * Return: 0 on success, negative error code otherwise. + */ +int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) +{ + int err = 0; + + if (id >= NR_DAMON_OPS) + return -EINVAL; + + mutex_lock(&damon_ops_lock); + if (!damon_registered_ops_id(id)) + err = -EINVAL; + else + ctx->ops = damon_registered_ops[id]; + mutex_unlock(&damon_ops_lock); + return err; +} + /* * Construct a damon_region struct * -- cgit v1.2.3-71-gd317 From 851040566a008f7248cb754d5bb9a3e34f2effe5 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:49:07 -0700 Subject: mm/damon/paddr,vaddr: remove damon_{p,v}a_{target_valid,set_operations}() Because DAMON debugfs interface and DAMON-based proactive reclaim are now using monitoring operations via registration mechanism, damon_{p,v}a_{target_valid,set_operations}() functions have no user. This commit clean them up. Link: https://lkml.kernel.org/r/20220215184603.1479-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: David Rientjes Cc: Xin Hao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 10 ---------- mm/damon/paddr.c | 20 +------------------- mm/damon/vaddr.c | 15 +-------------- 3 files changed, 2 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index 076da277b249..49c4a11ecf20 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -513,14 +513,4 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); #endif /* CONFIG_DAMON */ -#ifdef CONFIG_DAMON_VADDR -bool damon_va_target_valid(void *t); -void damon_va_set_operations(struct damon_ctx *ctx); -#endif /* CONFIG_DAMON_VADDR */ - -#ifdef CONFIG_DAMON_PADDR -bool damon_pa_target_valid(void *t); -void damon_pa_set_operations(struct damon_ctx *ctx); -#endif /* CONFIG_DAMON_PADDR */ - #endif /* _DAMON_H */ diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index d968bb38bd5d..7c263797a9a9 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -208,11 +208,6 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -bool damon_pa_target_valid(void *t) -{ - return true; -} - static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *scheme) @@ -261,19 +256,6 @@ static int damon_pa_scheme_score(struct damon_ctx *context, return DAMOS_MAX_SCORE; } -void damon_pa_set_operations(struct damon_ctx *ctx) -{ - ctx->ops.init = NULL; - ctx->ops.update = NULL; - ctx->ops.prepare_access_checks = damon_pa_prepare_access_checks; - ctx->ops.check_accesses = damon_pa_check_accesses; - ctx->ops.reset_aggregated = NULL; - ctx->ops.target_valid = damon_pa_target_valid; - ctx->ops.cleanup = NULL; - ctx->ops.apply_scheme = damon_pa_apply_scheme; - ctx->ops.get_scheme_score = damon_pa_scheme_score; -} - static int __init damon_pa_initcall(void) { struct damon_operations ops = { @@ -283,7 +265,7 @@ static int __init damon_pa_initcall(void) .prepare_access_checks = damon_pa_prepare_access_checks, .check_accesses = damon_pa_check_accesses, .reset_aggregated = NULL, - .target_valid = damon_pa_target_valid, + .target_valid = NULL, .cleanup = NULL, .apply_scheme = damon_pa_apply_scheme, .get_scheme_score = damon_pa_scheme_score, diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 87475ba37bec..b2ec0aa1ff45 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -653,7 +653,7 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) * Functions for the target validity check and cleanup */ -bool damon_va_target_valid(void *target) +static bool damon_va_target_valid(void *target) { struct damon_target *t = target; struct task_struct *task; @@ -739,19 +739,6 @@ static int damon_va_scheme_score(struct damon_ctx *context, return DAMOS_MAX_SCORE; } -void damon_va_set_operations(struct damon_ctx *ctx) -{ - ctx->ops.init = damon_va_init; - ctx->ops.update = damon_va_update; - ctx->ops.prepare_access_checks = damon_va_prepare_access_checks; - ctx->ops.check_accesses = damon_va_check_accesses; - ctx->ops.reset_aggregated = NULL; - ctx->ops.target_valid = damon_va_target_valid; - ctx->ops.cleanup = NULL; - ctx->ops.apply_scheme = damon_va_apply_scheme; - ctx->ops.get_scheme_score = damon_va_scheme_score; -} - static int __init damon_va_initcall(void) { struct damon_operations ops = { -- cgit v1.2.3-71-gd317 From 8b9b0d335a345cbc590baa5aa8aa02c467c1e4e8 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:49:21 -0700 Subject: mm/damon/core: allow non-exclusive DAMON start/stop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Introduce DAMON sysfs interface", v3. Introduction ============ DAMON's debugfs-based user interface (DAMON_DBGFS) served very well, so far. However, it unnecessarily depends on debugfs, while DAMON is not aimed to be used for only debugging. Also, the interface receives multiple values via one file. For example, schemes file receives 18 values. As a result, it is inefficient, hard to be used, and difficult to be extended. Especially, keeping backward compatibility of user space tools is getting only challenging. It would be better to implement another reliable and flexible interface and deprecate DAMON_DBGFS in long term. For the reason, this patchset introduces a sysfs-based new user interface of DAMON. The idea of the new interface is, using directory hierarchies and having one dedicated file for each value. For a short example, users can do the virtual address monitoring via the interface as below: # cd /sys/kernel/mm/damon/admin/ # echo 1 > kdamonds/nr_kdamonds # echo 1 > kdamonds/0/contexts/nr_contexts # echo vaddr > kdamonds/0/contexts/0/operations # echo 1 > kdamonds/0/contexts/0/targets/nr_targets # echo $(pidof ) > kdamonds/0/contexts/0/targets/0/pid_target # echo on > kdamonds/0/state A brief representation of the files hierarchy of DAMON sysfs interface is as below. Childs are represented with indentation, directories are having '/' suffix, and files in each directory are separated by comma. /sys/kernel/mm/damon/admin │ kdamonds/nr_kdamonds │ │ 0/state,pid │ │ │ contexts/nr_contexts │ │ │ │ 0/operations │ │ │ │ │ monitoring_attrs/ │ │ │ │ │ │ intervals/sample_us,aggr_us,update_us │ │ │ │ │ │ nr_regions/min,max │ │ │ │ │ targets/nr_targets │ │ │ │ │ │ 0/pid_target │ │ │ │ │ │ │ regions/nr_regions │ │ │ │ │ │ │ │ 0/start,end │ │ │ │ │ │ │ │ ... │ │ │ │ │ │ ... │ │ │ │ │ schemes/nr_schemes │ │ │ │ │ │ 0/action │ │ │ │ │ │ │ access_pattern/ │ │ │ │ │ │ │ │ sz/min,max │ │ │ │ │ │ │ │ nr_accesses/min,max │ │ │ │ │ │ │ │ age/min,max │ │ │ │ │ │ │ quotas/ms,bytes,reset_interval_ms │ │ │ │ │ │ │ │ weights/sz_permil,nr_accesses_permil,age_permil │ │ │ │ │ │ │ watermarks/metric,interval_us,high,mid,low │ │ │ │ │ │ │ stats/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds │ │ │ │ │ │ ... │ │ │ │ ... │ │ ... Detailed usage of the files will be described in the final Documentation patch of this patchset. Main Difference Between DAMON_DBGFS and DAMON_SYSFS --------------------------------------------------- At the moment, DAMON_DBGFS and DAMON_SYSFS provides same features. One important difference between them is their exclusiveness. DAMON_DBGFS works in an exclusive manner, so that no DAMON worker thread (kdamond) in the system can run concurrently and interfere somehow. For the reason, DAMON_DBGFS asks users to construct all monitoring contexts and start them at once. It's not a big problem but makes the operation a little bit complex and unflexible. For more flexible usage, DAMON_SYSFS moves the responsibility of preventing any possible interference to the admins and work in a non-exclusive manner. That is, users can configure and start contexts one by one. Note that DAMON respects both exclusive groups and non-exclusive groups of contexts, in a manner similar to that of reader-writer locks. That is, if any exclusive monitoring contexts (e.g., contexts that started via DAMON_DBGFS) are running, DAMON_SYSFS does not start new contexts, and vice versa. Future Plan of DAMON_DBGFS Deprecation ====================================== Once this patchset is merged, DAMON_DBGFS development will be frozen. That is, we will maintain it to work as is now so that no users will be break. But, it will not be extended to provide any new feature of DAMON. The support will be continued only until next LTS release. After that, we will drop DAMON_DBGFS. User-space Tooling Compatibility -------------------------------- As DAMON_SYSFS provides all features of DAMON_DBGFS, all user space tooling can move to DAMON_SYSFS. As we will continue supporting DAMON_DBGFS until next LTS kernel release, user space tools would have enough time to move to DAMON_SYSFS. The official user space tool, damo[1], is already supporting both DAMON_SYSFS and DAMON_DBGFS. Both correctness tests[2] and performance tests[3] of DAMON using DAMON_SYSFS also passed. [1] https://github.com/awslabs/damo [2] https://github.com/awslabs/damon-tests/tree/master/corr [3] https://github.com/awslabs/damon-tests/tree/master/perf Sequence of Patches =================== First two patches (patches 1-2) make core changes for DAMON_SYSFS. The first one (patch 1) allows non-exclusive DAMON contexts so that DAMON_SYSFS can work in non-exclusive mode, while the second one (patch 2) adds size of DAMON enum types so that DAMON API users can safely iterate the enums. Third patch (patch 3) implements basic sysfs stub for virtual address spaces monitoring. Note that this implements only sysfs files and DAMON is not linked. Fourth patch (patch 4) links the DAMON_SYSFS to DAMON so that users can control DAMON using the sysfs files. Following six patches (patches 5-10) implements other DAMON features that DAMON_DBGFS supports one by one (physical address space monitoring, DAMON-based operation schemes, schemes quotas, schemes prioritization weights, schemes watermarks, and schemes stats). Following patch (patch 11) adds a simple selftest for DAMON_SYSFS, and the final one (patch 12) documents DAMON_SYSFS. This patch (of 13): To avoid interference between DAMON contexts monitoring overlapping memory regions, damon_start() works in an exclusive manner. That is, damon_start() does nothing bug fails if any context that started by another instance of the function is still running. This makes its usage a little bit restrictive. However, admins could aware each DAMON usage and address such interferences on their own in some cases. This commit hence implements non-exclusive mode of the function and allows the callers to select the mode. Note that the exclusive groups and non-exclusive groups of contexts will respect each other in a manner similar to that of reader-writer locks. Therefore, this commit will not cause any behavioral change to the exclusive groups. Link: https://lkml.kernel.org/r/20220228081314.5770-1-sj@kernel.org Link: https://lkml.kernel.org/r/20220228081314.5770-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: Shuah Khan Cc: David Rientjes Cc: Xin Hao Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 2 +- mm/damon/core.c | 23 +++++++++++++++-------- mm/damon/dbgfs.c | 2 +- mm/damon/reclaim.c | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index 49c4a11ecf20..f8e99e47d747 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -508,7 +508,7 @@ int damon_nr_running_ctxs(void); int damon_register_ops(struct damon_operations *ops); int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); -int damon_start(struct damon_ctx **ctxs, int nr_ctxs); +int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); #endif /* CONFIG_DAMON */ diff --git a/mm/damon/core.c b/mm/damon/core.c index 82e0a4620c4f..c1e0fed4e877 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -24,6 +24,7 @@ static DEFINE_MUTEX(damon_lock); static int nr_running_ctxs; +static bool running_exclusive_ctxs; static DEFINE_MUTEX(damon_ops_lock); static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; @@ -434,22 +435,25 @@ static int __damon_start(struct damon_ctx *ctx) * damon_start() - Starts the monitorings for a given group of contexts. * @ctxs: an array of the pointers for contexts to start monitoring * @nr_ctxs: size of @ctxs + * @exclusive: exclusiveness of this contexts group * * This function starts a group of monitoring threads for a group of monitoring * contexts. One thread per each context is created and run in parallel. The - * caller should handle synchronization between the threads by itself. If a - * group of threads that created by other 'damon_start()' call is currently - * running, this function does nothing but returns -EBUSY. + * caller should handle synchronization between the threads by itself. If + * @exclusive is true and a group of threads that created by other + * 'damon_start()' call is currently running, this function does nothing but + * returns -EBUSY. * * Return: 0 on success, negative error code otherwise. */ -int damon_start(struct damon_ctx **ctxs, int nr_ctxs) +int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) { int i; int err = 0; mutex_lock(&damon_lock); - if (nr_running_ctxs) { + if ((exclusive && nr_running_ctxs) || + (!exclusive && running_exclusive_ctxs)) { mutex_unlock(&damon_lock); return -EBUSY; } @@ -460,13 +464,15 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs) break; nr_running_ctxs++; } + if (exclusive && nr_running_ctxs) + running_exclusive_ctxs = true; mutex_unlock(&damon_lock); return err; } /* - * __damon_stop() - Stops monitoring of given context. + * __damon_stop() - Stops monitoring of a given context. * @ctx: monitoring context * * Return: 0 on success, negative error code otherwise. @@ -504,9 +510,8 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) /* nr_running_ctxs is decremented in kdamond_fn */ err = __damon_stop(ctxs[i]); if (err) - return err; + break; } - return err; } @@ -1102,6 +1107,8 @@ static int kdamond_fn(void *data) mutex_lock(&damon_lock); nr_running_ctxs--; + if (!nr_running_ctxs && running_exclusive_ctxs) + running_exclusive_ctxs = false; mutex_unlock(&damon_lock); return 0; diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 05b574cbcea8..a0dab8b5e45f 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -967,7 +967,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, return -EINVAL; } } - ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs); + ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true); } else if (!strncmp(kbuf, "off", count)) { ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs); } else { diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index b53d9c22fad1..e34c4d0c4d93 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -330,7 +330,7 @@ static int damon_reclaim_turn(bool on) if (err) goto free_scheme_out; - err = damon_start(&ctx, 1); + err = damon_start(&ctx, 1, true); if (!err) { kdamond_pid = ctx->kdamond->pid; return 0; -- cgit v1.2.3-71-gd317 From 5257f36ec289d544532f2889cbed11abbb06cf0c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 22 Mar 2022 14:49:24 -0700 Subject: mm/damon/core: add number of each enum type values This commit declares the number of legal values for each DAMON enum types to make traversals of such DAMON enum types easy and safe. Link: https://lkml.kernel.org/r/20220228081314.5770-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: David Rientjes Cc: Greg Kroah-Hartman Cc: Jonathan Corbet Cc: Shuah Khan Cc: Xin Hao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/damon.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/damon.h b/include/linux/damon.h index f8e99e47d747..f23cbfa4248d 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -87,6 +87,7 @@ struct damon_target { * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE. * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. * @DAMOS_STAT: Do nothing but count the stat. + * @NR_DAMOS_ACTIONS: Total number of DAMOS actions */ enum damos_action { DAMOS_WILLNEED, @@ -95,6 +96,7 @@ enum damos_action { DAMOS_HUGEPAGE, DAMOS_NOHUGEPAGE, DAMOS_STAT, /* Do nothing but only record the stat */ + NR_DAMOS_ACTIONS, }; /** @@ -157,10 +159,12 @@ struct damos_quota { * * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme. * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000]. + * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics */ enum damos_wmark_metric { DAMOS_WMARK_NONE, DAMOS_WMARK_FREE_MEM_RATE, + NR_DAMOS_WMARK_METRICS, }; /** -- cgit v1.2.3-71-gd317 From 2af7e566a8616c278e1d7287ce86cd3900bed943 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 22 Mar 2022 10:22:24 -0700 Subject: net/mlx5e: Fix build warning, detected write beyond size of field When merged with Linus tree, the cited patch below will cause the following build warning: In function 'fortify_memset_chk', inlined from 'mlx5e_xmit_xdp_frame' at drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c:438:3: include/linux/fortify-string.h:242:25: error: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror=attribute-warning] 242 | __write_overflow_field(p_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fix that by grouping the fields to memeset in struct_group() to avoid the false alarm. Fixes: 9ded70fa1d81 ("net/mlx5e: Don't prefill WQEs in XDP SQ in the multi buffer mode") Reported-by: Stephen Rothwell Suggested-by: Stephen Rothwell Signed-off-by: Saeed Mahameed Link: https://lore.kernel.org/r/20220322172224.31849-1-saeed@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 +-- include/linux/mlx5/qp.h | 5 +++++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index f35b62ce4c07..8f321a6c0809 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -435,8 +435,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, u8 num_pkts = 1 + num_frags; int i; - memset(&cseg->signature, 0, sizeof(*cseg) - - sizeof(cseg->opmod_idx_opcode) - sizeof(cseg->qpn_ds)); + memset(&cseg->trailer, 0, sizeof(cseg->trailer)); memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 61e48d459b23..8bda3ba5b109 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -202,6 +202,9 @@ struct mlx5_wqe_fmr_seg { struct mlx5_wqe_ctrl_seg { __be32 opmod_idx_opcode; __be32 qpn_ds; + + struct_group(trailer, + u8 signature; u8 rsvd[2]; u8 fm_ce_se; @@ -211,6 +214,8 @@ struct mlx5_wqe_ctrl_seg { __be32 umr_mkey; __be32 tis_tir_num; }; + + ); /* end of trailer group */ }; #define MLX5_WQE_CTRL_DS_MASK 0x3f -- cgit v1.2.3-71-gd317 From d578c770c85233af592e54537f93f3831bde7e9a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 23 Mar 2022 09:13:08 +0800 Subject: block: avoid calling blkg_free() in atomic context blkg_free() can currently be called in atomic context, either spin lock is held, or run in rcu callback. Meantime either request queue's release handler or ->pd_free_fn can sleep. Fix the issue by scheduling a work function for freeing blkcg_gq the instance. [ 148.553894] BUG: sleeping function called from invalid context at block/blk-sysfs.c:767 [ 148.557381] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 0, name: swapper/13 [ 148.560741] preempt_count: 101, expected: 0 [ 148.562577] RCU nest depth: 0, expected: 0 [ 148.564379] 1 lock held by swapper/13/0: [ 148.566127] #0: ffffffff82615f80 (rcu_callback){....}-{0:0}, at: rcu_lock_acquire+0x0/0x1b [ 148.569640] Preemption disabled at: [ 148.569642] [] ___slab_alloc+0x554/0x661 [ 148.573559] CPU: 13 PID: 0 Comm: swapper/13 Kdump: loaded Not tainted 5.17.0_up+ #110 [ 148.576834] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.14.0-1.fc33 04/01/2014 [ 148.579768] Call Trace: [ 148.580567] [ 148.581262] dump_stack_lvl+0x56/0x7c [ 148.582367] ? ___slab_alloc+0x554/0x661 [ 148.583526] __might_resched+0x1af/0x1c8 [ 148.584678] blk_release_queue+0x24/0x109 [ 148.585861] kobject_cleanup+0xc9/0xfe [ 148.586979] blkg_free+0x46/0x63 [ 148.587962] rcu_do_batch+0x1c5/0x3db [ 148.589057] rcu_core+0x14a/0x184 [ 148.590065] __do_softirq+0x14d/0x2c7 [ 148.591167] __irq_exit_rcu+0x7a/0xd4 [ 148.592264] sysvec_apic_timer_interrupt+0x82/0xa5 [ 148.593649] [ 148.594354] [ 148.595058] asm_sysvec_apic_timer_interrupt+0x12/0x20 Cc: Tejun Heo Fixes: 0a9a25ca7843 ("block: let blkcg_gq grab request queue's refcnt") Reported-by: Christoph Hellwig Link: https://lore.kernel.org/linux-block/20220322093322.GA27283@lst.de/ Signed-off-by: Ming Lei Link: https://lore.kernel.org/r/20220323011308.2010380-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 32 ++++++++++++++++++++++---------- include/linux/blk-cgroup.h | 5 ++++- 2 files changed, 26 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d53b0d69dd73..6ed43fc0e6ab 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q, return pol && test_bit(pol->plid, q->blkcg_pols); } -/** - * blkg_free - free a blkg - * @blkg: blkg to free - * - * Free @blkg which may be partially allocated. - */ -static void blkg_free(struct blkcg_gq *blkg) +static void blkg_free_workfn(struct work_struct *work) { + struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, + free_work); int i; - if (!blkg) - return; - for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); @@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(blkg); } +/** + * blkg_free - free a blkg + * @blkg: blkg to free + * + * Free @blkg which may be partially allocated. + */ +static void blkg_free(struct blkcg_gq *blkg) +{ + if (!blkg) + return; + + /* + * Both ->pd_free_fn() and request queue's release handler may + * sleep, so free us by scheduling one work func + */ + INIT_WORK(&blkg->free_work, blkg_free_workfn); + schedule_work(&blkg->free_work); +} + static void __blkg_release(struct rcu_head *rcu) { struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index f2ad8ed8f777..652cd05b0924 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -95,7 +95,10 @@ struct blkcg_gq { spinlock_t async_bio_lock; struct bio_list async_bios; - struct work_struct async_bio_work; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; atomic_t use_delay; atomic64_t delay_nsec; -- cgit v1.2.3-71-gd317 From 553f685ebf9689e701ec6b95c0ca3595cf1a743f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 11 Mar 2022 20:55:18 +0800 Subject: mfd: db8500-prcmu: Remove unused inline function commit b0e846248de5 ("mfd: db8500-prcmu: Remove dead code for a non-existing config") left behind this, remove it. Signed-off-by: YueHaibing Reviewed-by: Linus Walleij Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20220311125518.31064-1-yuehaibing@huawei.com --- include/linux/mfd/dbx500-prcmu.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index 2a255362b5ac..e7a7e70fdb38 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -561,10 +561,6 @@ static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) return 0; } -static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {} - -static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {} - static inline int prcmu_qos_requirement(int prcmu_qos_class) { return 0; -- cgit v1.2.3-71-gd317 From 30d024b5058e0433914022f87d917a97a9527632 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 23 Mar 2022 15:35:10 +1200 Subject: cacheflush.h: Add forward declaration for struct folio The struct folio is not declared in cacheflush.h so we need to provide a forward declaration as otherwise users of this header file may get warnings. Reported-by: Guenter Roeck Fixes: 522a0032af00 ("Add linux/cacheflush.h") Signed-off-by: Herbert Xu Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Linus Torvalds --- include/linux/cacheflush.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h index fef8b607f97e..a6189d21f2ba 100644 --- a/include/linux/cacheflush.h +++ b/include/linux/cacheflush.h @@ -4,6 +4,8 @@ #include +struct folio; + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO void flush_dcache_folio(struct folio *folio); -- cgit v1.2.3-71-gd317 From d91612d7f01aca454469976d25db761c5085ae4d Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 2 Feb 2022 20:17:35 -0600 Subject: clk: sunxi-ng: Add support for the sun6i RTC clocks The RTC power domain in sun6i and newer SoCs manages the 16 MHz RC oscillator (called "IOSC" or "osc16M") and the optional 32 kHz crystal oscillator (called "LOSC" or "osc32k"). Starting with the H6, this power domain also handles the 24 MHz DCXO (called variously "HOSC", "dcxo24M", or "osc24M") as well. The H6 also adds a calibration circuit for IOSC. Later SoCs introduce further variations on the design: - H616 adds an additional mux for the 32 kHz fanout source. - R329 adds an additional mux for the RTC timekeeping clock, a clock for the SPI bus between power domains inside the RTC, and removes the IOSC calibration functionality. Take advantage of the CCU framework to handle this increased complexity. This driver is intended to be a drop-in replacement for the existing RTC clock provider. So some runtime adjustment of the clock parents is needed, both to handle hardware differences, and to support the old binding which omitted some of the input clocks. Signed-off-by: Samuel Holland Acked-by: Stephen Boyd Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220203021736.13434-6-samuel@sholland.org --- drivers/clk/sunxi-ng/Kconfig | 5 + drivers/clk/sunxi-ng/Makefile | 2 + drivers/clk/sunxi-ng/ccu-sun6i-rtc.c | 378 +++++++++++++++++++++++++++++++++++ drivers/clk/sunxi-ng/ccu-sun6i-rtc.h | 15 ++ drivers/rtc/rtc-sun6i.c | 7 + include/linux/clk/sunxi-ng.h | 2 + 6 files changed, 409 insertions(+) create mode 100644 drivers/clk/sunxi-ng/ccu-sun6i-rtc.c create mode 100644 drivers/clk/sunxi-ng/ccu-sun6i-rtc.h (limited to 'include/linux') diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 68a94e5af8ed..461537679c04 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -69,6 +69,11 @@ config SUN6I_A31_CCU default MACH_SUN6I depends on MACH_SUN6I || COMPILE_TEST +config SUN6I_RTC_CCU + tristate "Support for the Allwinner H616/R329 RTC CCU" + default ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST + config SUN8I_A23_CCU tristate "Support for the Allwinner A23 CCU" default MACH_SUN8I diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index ec931cb7aa14..6b3ae2b620db 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o +obj-$(CONFIG_SUN6I_RTC_CCU) += sun6i-rtc-ccu.o obj-$(CONFIG_SUN8I_A23_CCU) += sun8i-a23-ccu.o obj-$(CONFIG_SUN8I_A33_CCU) += sun8i-a33-ccu.o obj-$(CONFIG_SUN8I_A83T_CCU) += sun8i-a83t-ccu.o @@ -60,6 +61,7 @@ sun50i-h616-ccu-y += ccu-sun50i-h616.o sun4i-a10-ccu-y += ccu-sun4i-a10.o sun5i-ccu-y += ccu-sun5i.o sun6i-a31-ccu-y += ccu-sun6i-a31.o +sun6i-rtc-ccu-y += ccu-sun6i-rtc.o sun8i-a23-ccu-y += ccu-sun8i-a23.o sun8i-a33-ccu-y += ccu-sun8i-a33.o sun8i-a83t-ccu-y += ccu-sun8i-a83t.o diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c new file mode 100644 index 000000000000..a39670a7c446 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2021 Samuel Holland +// + +#include +#include +#include +#include +#include + +#include "ccu_common.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mux.h" + +#include "ccu-sun6i-rtc.h" + +#define IOSC_ACCURACY 300000000 /* 30% */ +#define IOSC_RATE 16000000 + +#define LOSC_RATE 32768 +#define LOSC_RATE_SHIFT 15 + +#define LOSC_CTRL_REG 0x0 +#define LOSC_CTRL_KEY 0x16aa0000 + +#define IOSC_32K_CLK_DIV_REG 0x8 +#define IOSC_32K_CLK_DIV GENMASK(4, 0) +#define IOSC_32K_PRE_DIV 32 + +#define IOSC_CLK_CALI_REG 0xc +#define IOSC_CLK_CALI_DIV_ONES 22 +#define IOSC_CLK_CALI_EN BIT(1) +#define IOSC_CLK_CALI_SRC_SEL BIT(0) + +#define LOSC_OUT_GATING_REG 0x60 + +#define DCXO_CTRL_REG 0x160 +#define DCXO_CTRL_CLK16M_RC_EN BIT(0) + +struct sun6i_rtc_match_data { + bool have_ext_osc32k : 1; + bool have_iosc_calibration : 1; + bool rtc_32k_single_parent : 1; + const struct clk_parent_data *osc32k_fanout_parents; + u8 osc32k_fanout_nparents; +}; + +static bool have_iosc_calibration; + +static int ccu_iosc_enable(struct clk_hw *hw) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + + return ccu_gate_helper_enable(cm, DCXO_CTRL_CLK16M_RC_EN); +} + +static void ccu_iosc_disable(struct clk_hw *hw) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + + return ccu_gate_helper_disable(cm, DCXO_CTRL_CLK16M_RC_EN); +} + +static int ccu_iosc_is_enabled(struct clk_hw *hw) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + + return ccu_gate_helper_is_enabled(cm, DCXO_CTRL_CLK16M_RC_EN); +} + +static unsigned long ccu_iosc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + + if (have_iosc_calibration) { + u32 reg = readl(cm->base + IOSC_CLK_CALI_REG); + + /* + * Recover the IOSC frequency by shifting the ones place of + * (fixed-point divider * 32768) into bit zero. + */ + if (reg & IOSC_CLK_CALI_EN) + return reg >> (IOSC_CLK_CALI_DIV_ONES - LOSC_RATE_SHIFT); + } + + return IOSC_RATE; +} + +static unsigned long ccu_iosc_recalc_accuracy(struct clk_hw *hw, + unsigned long parent_accuracy) +{ + return IOSC_ACCURACY; +} + +static const struct clk_ops ccu_iosc_ops = { + .enable = ccu_iosc_enable, + .disable = ccu_iosc_disable, + .is_enabled = ccu_iosc_is_enabled, + .recalc_rate = ccu_iosc_recalc_rate, + .recalc_accuracy = ccu_iosc_recalc_accuracy, +}; + +static struct ccu_common iosc_clk = { + .reg = DCXO_CTRL_REG, + .hw.init = CLK_HW_INIT_NO_PARENT("iosc", &ccu_iosc_ops, + CLK_GET_RATE_NOCACHE), +}; + +static int ccu_iosc_32k_prepare(struct clk_hw *hw) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val; + + if (!have_iosc_calibration) + return 0; + + val = readl(cm->base + IOSC_CLK_CALI_REG); + writel(val | IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL, + cm->base + IOSC_CLK_CALI_REG); + + return 0; +} + +static void ccu_iosc_32k_unprepare(struct clk_hw *hw) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val; + + if (!have_iosc_calibration) + return; + + val = readl(cm->base + IOSC_CLK_CALI_REG); + writel(val & ~(IOSC_CLK_CALI_EN | IOSC_CLK_CALI_SRC_SEL), + cm->base + IOSC_CLK_CALI_REG); +} + +static unsigned long ccu_iosc_32k_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val; + + if (have_iosc_calibration) { + val = readl(cm->base + IOSC_CLK_CALI_REG); + + /* Assume the calibrated 32k clock is accurate. */ + if (val & IOSC_CLK_CALI_SRC_SEL) + return LOSC_RATE; + } + + val = readl(cm->base + IOSC_32K_CLK_DIV_REG) & IOSC_32K_CLK_DIV; + + return parent_rate / IOSC_32K_PRE_DIV / (val + 1); +} + +static unsigned long ccu_iosc_32k_recalc_accuracy(struct clk_hw *hw, + unsigned long parent_accuracy) +{ + struct ccu_common *cm = hw_to_ccu_common(hw); + u32 val; + + if (have_iosc_calibration) { + val = readl(cm->base + IOSC_CLK_CALI_REG); + + /* Assume the calibrated 32k clock is accurate. */ + if (val & IOSC_CLK_CALI_SRC_SEL) + return 0; + } + + return parent_accuracy; +} + +static const struct clk_ops ccu_iosc_32k_ops = { + .prepare = ccu_iosc_32k_prepare, + .unprepare = ccu_iosc_32k_unprepare, + .recalc_rate = ccu_iosc_32k_recalc_rate, + .recalc_accuracy = ccu_iosc_32k_recalc_accuracy, +}; + +static struct ccu_common iosc_32k_clk = { + .hw.init = CLK_HW_INIT_HW("iosc-32k", &iosc_clk.hw, + &ccu_iosc_32k_ops, + CLK_GET_RATE_NOCACHE), +}; + +static const struct clk_hw *ext_osc32k[] = { NULL }; /* updated during probe */ + +static SUNXI_CCU_GATE_HWS(ext_osc32k_gate_clk, "ext-osc32k-gate", + ext_osc32k, 0x0, BIT(4), 0); + +static const struct clk_hw *osc32k_parents[] = { + &iosc_32k_clk.hw, + &ext_osc32k_gate_clk.common.hw +}; + +static struct clk_init_data osc32k_init_data = { + .name = "osc32k", + .ops = &ccu_mux_ops, + .parent_hws = osc32k_parents, + .num_parents = ARRAY_SIZE(osc32k_parents), /* updated during probe */ +}; + +static struct ccu_mux osc32k_clk = { + .mux = _SUNXI_CCU_MUX(0, 1), + .common = { + .reg = LOSC_CTRL_REG, + .features = CCU_FEATURE_KEY_FIELD, + .hw.init = &osc32k_init_data, + }, +}; + +/* This falls back to the global name for fwnodes without a named reference. */ +static const struct clk_parent_data osc24M[] = { + { .fw_name = "hosc", .name = "osc24M" } +}; + +static struct ccu_gate osc24M_32k_clk = { + .enable = BIT(16), + .common = { + .reg = LOSC_OUT_GATING_REG, + .prediv = 750, + .features = CCU_FEATURE_ALL_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS_DATA("osc24M-32k", osc24M, + &ccu_gate_ops, 0), + }, +}; + +static const struct clk_hw *rtc_32k_parents[] = { + &osc32k_clk.common.hw, + &osc24M_32k_clk.common.hw +}; + +static struct clk_init_data rtc_32k_init_data = { + .name = "rtc-32k", + .ops = &ccu_mux_ops, + .parent_hws = rtc_32k_parents, + .num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */ +}; + +static struct ccu_mux rtc_32k_clk = { + .mux = _SUNXI_CCU_MUX(1, 1), + .common = { + .reg = LOSC_CTRL_REG, + .features = CCU_FEATURE_KEY_FIELD, + .hw.init = &rtc_32k_init_data, + }, +}; + +static struct clk_init_data osc32k_fanout_init_data = { + .name = "osc32k-fanout", + .ops = &ccu_mux_ops, + /* parents are set during probe */ +}; + +static struct ccu_mux osc32k_fanout_clk = { + .enable = BIT(0), + .mux = _SUNXI_CCU_MUX(1, 2), + .common = { + .reg = LOSC_OUT_GATING_REG, + .hw.init = &osc32k_fanout_init_data, + }, +}; + +static struct ccu_common *sun6i_rtc_ccu_clks[] = { + &iosc_clk, + &iosc_32k_clk, + &ext_osc32k_gate_clk.common, + &osc32k_clk.common, + &osc24M_32k_clk.common, + &rtc_32k_clk.common, + &osc32k_fanout_clk.common, +}; + +static struct clk_hw_onecell_data sun6i_rtc_ccu_hw_clks = { + .num = CLK_NUMBER, + .hws = { + [CLK_OSC32K] = &osc32k_clk.common.hw, + [CLK_OSC32K_FANOUT] = &osc32k_fanout_clk.common.hw, + [CLK_IOSC] = &iosc_clk.hw, + [CLK_IOSC_32K] = &iosc_32k_clk.hw, + [CLK_EXT_OSC32K_GATE] = &ext_osc32k_gate_clk.common.hw, + [CLK_OSC24M_32K] = &osc24M_32k_clk.common.hw, + [CLK_RTC_32K] = &rtc_32k_clk.common.hw, + }, +}; + +static const struct sunxi_ccu_desc sun6i_rtc_ccu_desc = { + .ccu_clks = sun6i_rtc_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun6i_rtc_ccu_clks), + + .hw_clks = &sun6i_rtc_ccu_hw_clks, +}; + +static const struct clk_parent_data sun50i_h616_osc32k_fanout_parents[] = { + { .hw = &osc32k_clk.common.hw }, + { .fw_name = "pll-32k" }, + { .hw = &osc24M_32k_clk.common.hw } +}; + +static const struct clk_parent_data sun50i_r329_osc32k_fanout_parents[] = { + { .hw = &osc32k_clk.common.hw }, + { .hw = &ext_osc32k_gate_clk.common.hw }, + { .hw = &osc24M_32k_clk.common.hw } +}; + +static const struct sun6i_rtc_match_data sun50i_h616_rtc_ccu_data = { + .have_iosc_calibration = true, + .rtc_32k_single_parent = true, + .osc32k_fanout_parents = sun50i_h616_osc32k_fanout_parents, + .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_h616_osc32k_fanout_parents), +}; + +static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = { + .have_ext_osc32k = true, + .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents, + .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents), +}; + +static const struct of_device_id sun6i_rtc_ccu_match[] = { + { + .compatible = "allwinner,sun50i-h616-rtc", + .data = &sun50i_h616_rtc_ccu_data, + }, + { + .compatible = "allwinner,sun50i-r329-rtc", + .data = &sun50i_r329_rtc_ccu_data, + }, +}; + +int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg) +{ + const struct sun6i_rtc_match_data *data; + struct clk *ext_osc32k_clk = NULL; + const struct of_device_id *match; + + /* This driver is only used for newer variants of the hardware. */ + match = of_match_device(sun6i_rtc_ccu_match, dev); + if (!match) + return 0; + + data = match->data; + have_iosc_calibration = data->have_iosc_calibration; + + if (data->have_ext_osc32k) { + const char *fw_name; + + /* ext-osc32k was the only input clock in the old binding. */ + fw_name = of_property_read_bool(dev->of_node, "clock-names") + ? "ext-osc32k" : NULL; + ext_osc32k_clk = devm_clk_get_optional(dev, fw_name); + if (IS_ERR(ext_osc32k_clk)) + return PTR_ERR(ext_osc32k_clk); + } + + if (ext_osc32k_clk) { + /* Link ext-osc32k-gate to its parent. */ + *ext_osc32k = __clk_get_hw(ext_osc32k_clk); + } else { + /* ext-osc32k-gate is an orphan, so do not register it. */ + sun6i_rtc_ccu_hw_clks.hws[CLK_EXT_OSC32K_GATE] = NULL; + osc32k_init_data.num_parents = 1; + } + + if (data->rtc_32k_single_parent) + rtc_32k_init_data.num_parents = 1; + + osc32k_fanout_init_data.parent_data = data->osc32k_fanout_parents; + osc32k_fanout_init_data.num_parents = data->osc32k_fanout_nparents; + + return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc); +} + +MODULE_IMPORT_NS(SUNXI_CCU); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h new file mode 100644 index 000000000000..9ae821fc2599 --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _CCU_SUN6I_RTC_H +#define _CCU_SUN6I_RTC_H + +#include + +#define CLK_IOSC_32K 3 +#define CLK_EXT_OSC32K_GATE 4 +#define CLK_OSC24M_32K 5 +#define CLK_RTC_32K 6 + +#define CLK_NUMBER (CLK_RTC_32K + 1) + +#endif /* _CCU_SUN6I_RTC_H */ diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 166866b67974..5252ce4cbda4 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -744,6 +745,12 @@ static int sun6i_rtc_probe(struct platform_device *pdev) chip->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); + + if (IS_REACHABLE(CONFIG_SUN6I_RTC_CCU)) { + ret = sun6i_rtc_ccu_probe(dev, chip->base); + if (ret) + return ret; + } } platform_set_drvdata(pdev, chip); diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h index cf32123b39f5..57c8ec44ab4e 100644 --- a/include/linux/clk/sunxi-ng.h +++ b/include/linux/clk/sunxi-ng.h @@ -9,4 +9,6 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode); int sunxi_ccu_get_mmc_timing_mode(struct clk *clk); +int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg); + #endif -- cgit v1.2.3-71-gd317 From 5c0a04a663019dd14cb000d370cff0ced6df7ef1 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 9 Mar 2022 17:22:33 +0100 Subject: rtc: ds1685: drop no_irq No platforms are currently setting no_irq. Anyway, letting platform_get_irq fail is fine as this means that there is no IRQ. In that case, clear RTC_FEATURE_ALARM so the core knows there are no alarms. Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220309162301.61679-2-alexandre.belloni@bootlin.com --- drivers/rtc/rtc-ds1685.c | 14 +++++--------- include/linux/rtc/ds1685.h | 1 - 2 files changed, 5 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 0ec1e44e3431..a24331ba8a5f 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -1285,13 +1285,10 @@ ds1685_rtc_probe(struct platform_device *pdev) * there won't be an automatic way of notifying the kernel about it, * unless ctrlc is explicitly polled. */ - if (!pdata->no_irq) { - ret = platform_get_irq(pdev, 0); - if (ret <= 0) - return ret; - - rtc->irq_num = ret; - + rtc->irq_num = platform_get_irq(pdev, 0); + if (rtc->irq_num <= 0) { + clear_bit(RTC_FEATURE_ALARM, rtc_dev->features); + } else { /* Request an IRQ. */ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq_num, NULL, ds1685_rtc_irq_handler, @@ -1305,7 +1302,6 @@ ds1685_rtc_probe(struct platform_device *pdev) rtc->irq_num = 0; } } - rtc->no_irq = pdata->no_irq; /* Setup complete. */ ds1685_rtc_switch_to_bank0(rtc); @@ -1394,7 +1390,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev) * have been taken care of by the shutdown scripts and this * is the final function call. */ - if (!rtc->no_irq) + if (rtc->irq_num) disable_irq_nosync(rtc->irq_num); /* Oscillator must be on and the countdown chain enabled. */ diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h index 67ee9d20cc5a..5a41c3bbcbe3 100644 --- a/include/linux/rtc/ds1685.h +++ b/include/linux/rtc/ds1685.h @@ -46,7 +46,6 @@ struct ds1685_priv { u32 regstep; int irq_num; bool bcd_mode; - bool no_irq; u8 (*read)(struct ds1685_priv *, int); void (*write)(struct ds1685_priv *, int, u8); void (*prepare_poweroff)(void); -- cgit v1.2.3-71-gd317 From 1a31d63632553a54af6c0c3c5b5930e931a94ee4 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 9 Mar 2022 17:23:00 +0100 Subject: rtc: remove uie_unsupported uie_unsupported is not used by any drivers anymore, remove it. Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220309162301.61679-29-alexandre.belloni@bootlin.com --- drivers/rtc/class.c | 3 --- include/linux/rtc.h | 2 -- 2 files changed, 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 40d504dac1a9..3c8eec2218df 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -399,9 +399,6 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc) if (!rtc->ops->set_alarm) clear_bit(RTC_FEATURE_ALARM, rtc->features); - if (rtc->uie_unsupported) - clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features); - if (rtc->ops->set_offset) set_bit(RTC_FEATURE_CORRECTION, rtc->features); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 47fd1c2d3a57..1fd9c6a21ebe 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -110,8 +110,6 @@ struct rtc_device { struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; - /* Some hardware can't support UIE mode */ - int uie_unsupported; /* * This offset specifies the update timing of the RTC. -- cgit v1.2.3-71-gd317 From de7a9e949f4f094741f708cd05572f932d009d02 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Wed, 23 Mar 2022 22:15:49 +0530 Subject: drivers/nvdimm: Fix build failure when CONFIG_PERF_EVENTS is not set The following build failure occurs when CONFIG_PERF_EVENTS is not set as generic pmu functions are not visible in that scenario. |-- s390-randconfig-r044-20220313 | |-- nd_perf.c:(.text):undefined-reference-to-perf_pmu_migrate_context | |-- nd_perf.c:(.text):undefined-reference-to-perf_pmu_register | `-- nd_perf.c:(.text):undefined-reference-to-perf_pmu_unregister Similar build failure in nds32 architecture: nd_perf.c:(.text+0x21e): undefined reference to `perf_pmu_migrate_context' nd_perf.c:(.text+0x434): undefined reference to `perf_pmu_register' nd_perf.c:(.text+0x57c): undefined reference to `perf_pmu_unregister' Fix this issue by adding check for CONFIG_PERF_EVENTS config option and disabling the nvdimm perf interface incase this config is not set. Also remove function declaration of perf_pmu_migrate_context, perf_pmu_register, perf_pmu_unregister functions from nd.h as these are common pmu functions which are part of perf_event.h and since we are disabling nvdimm perf interface incase CONFIG_PERF_EVENTS option is not set, we not need to declare them in nd.h Also move the platform_device header file addition part from nd.h to nd_perf.c and add stub functions for register_nvdimm_pmu and unregister_nvdimm_pmu functions to handle CONFIG_PERF_EVENTS=n case. Fixes: 0fab1ba6ad6b ("drivers/nvdimm: Add perf interface to expose nvdimm performance stats") (Commit id based on libnvdimm-for-next tree) Signed-off-by: Kajol Jain Link: https://lore.kernel.org/all/62317124.YBQFU33+s%2FwdvWGj%25lkp@intel.com/ Reported-by: kernel test robot Link: https://lore.kernel.org/r/20220323164550.109768-1-kjain@linux.ibm.com Signed-off-by: Dan Williams --- drivers/nvdimm/Makefile | 2 +- drivers/nvdimm/nd_perf.c | 1 + include/linux/nd.h | 16 ++++++++++++---- 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile index 3fb806748716..ba0296dca9db 100644 --- a/drivers/nvdimm/Makefile +++ b/drivers/nvdimm/Makefile @@ -15,7 +15,7 @@ nd_e820-y := e820.o libnvdimm-y := core.o libnvdimm-y += bus.o libnvdimm-y += dimm_devs.o -libnvdimm-y += nd_perf.o +libnvdimm-$(CONFIG_PERF_EVENTS) += nd_perf.o libnvdimm-y += dimm.o libnvdimm-y += region_devs.o libnvdimm-y += region.o diff --git a/drivers/nvdimm/nd_perf.c b/drivers/nvdimm/nd_perf.c index 314415894acf..433bbb68ae64 100644 --- a/drivers/nvdimm/nd_perf.c +++ b/drivers/nvdimm/nd_perf.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) "nvdimm_pmu: " fmt #include +#include #define EVENT(_name, _code) enum{_name = _code} diff --git a/include/linux/nd.h b/include/linux/nd.h index 7b2ccbdc1cbc..b9771ba1ef87 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -9,7 +9,6 @@ #include #include #include -#include enum nvdimm_event { NVDIMM_REVALIDATE_POISON, @@ -57,15 +56,24 @@ struct nvdimm_pmu { struct cpumask arch_cpumask; }; +struct platform_device; + +#ifdef CONFIG_PERF_EVENTS extern ssize_t nvdimm_events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev); void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu); -void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); -int perf_pmu_register(struct pmu *pmu, const char *name, int type); -void perf_pmu_unregister(struct pmu *pmu); + +#else +static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev) +{ + return -ENXIO; +} + +static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { } +#endif struct nd_device_driver { struct device_driver drv; -- cgit v1.2.3-71-gd317 From 179fd6ba3bacbf7b19cbdf9b14be109d54318394 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 23 Mar 2022 16:05:32 -0700 Subject: Documentation/sparse: add hints about __CHECKER__ Several attributes depend on __CHECKER__, but previously there was no clue in the tree about when __CHECKER__ might be defined. Add hints at the most common places (__kernel, __user, __iomem, __bitwise) and in the sparse documentation. Link: https://lkml.kernel.org/r/20220310220927.245704-3-helgaas@kernel.org Signed-off-by: Bjorn Helgaas Cc: Jonathan Corbet Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: "Michael S . Tsirkin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/dev-tools/sparse.rst | 2 ++ include/linux/compiler_types.h | 1 + include/uapi/linux/types.h | 1 + 3 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/Documentation/dev-tools/sparse.rst b/Documentation/dev-tools/sparse.rst index 02102be7ff49..dc791c8d84d1 100644 --- a/Documentation/dev-tools/sparse.rst +++ b/Documentation/dev-tools/sparse.rst @@ -100,3 +100,5 @@ have already built it. The optional make variable CF can be used to pass arguments to sparse. The build system passes -Wbitwise to sparse automatically. + +Note that sparse defines the __CHECKER__ preprocessor symbol. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3c1795fdb568..4b3915ce38a4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -4,6 +4,7 @@ #ifndef __ASSEMBLY__ +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ /* address spaces */ # define __kernel __attribute__((address_space(0))) diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 71696f424ac8..c4dc597f3dcf 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -19,6 +19,7 @@ * any application/library that wants linux/types.h. */ +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ #define __bitwise __attribute__((bitwise)) #else -- cgit v1.2.3-71-gd317 From 14e83077d55ff4b88fe39f5e98fb8230c2ccb4fb Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Wed, 23 Mar 2022 16:05:41 -0700 Subject: include: drop pointless __compiler_offsetof indirection (1) compiler_types.h is unconditionally included via an -include flag (see scripts/Makefile.lib), and it defines __compiler_offsetof unconditionally. So testing for definedness of __compiler_offsetof is mostly pointless. (2) Every relevant compiler provides __builtin_offsetof (even sparse has had that for 14 years), and if for whatever reason one would end up picking up the poor man's fallback definition (C file compiler with completely custom CFLAGS?), newer clang versions won't treat the result as an Integer Constant Expression, so if used in place where such is required (static initializer or static_assert), one would get errors like t.c:11:16: error: static_assert expression is not an integral constant expression t.c:11:16: note: cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression t.c:4:33: note: expanded from macro 'offsetof' #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) So just define offsetof unconditionally and directly in terms of __builtin_offsetof. Link: https://lkml.kernel.org/r/20220202102147.326672-1-linux@rasmusvillemoes.dk Signed-off-by: Rasmus Villemoes Reviewed-by: Miguel Ojeda Reviewed-by: Nathan Chancellor Reviewed-by: Kees Cook Acked-by: Nick Desaulniers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler_types.h | 2 -- include/linux/stddef.h | 6 +----- 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 4b3915ce38a4..232dbd97f8b1 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -138,8 +138,6 @@ struct ftrace_likely_data { */ #define __naked __attribute__((__naked__)) notrace -#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) - /* * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 diff --git a/include/linux/stddef.h b/include/linux/stddef.h index ca507bd5f808..929d67710cc5 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -13,11 +13,7 @@ enum { }; #undef offsetof -#ifdef __compiler_offsetof -#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) -#else -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) -#endif +#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) /** * sizeof_field() - Report the size of a struct field in bytes -- cgit v1.2.3-71-gd317 From f334f5668bedf7307f6df1d98b14f55902931926 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 23 Mar 2022 16:05:44 -0700 Subject: ilog2: force inlining of __ilog2_u32() and __ilog2_u64() Building a kernel with CONFIG_CC_OPTIMISE_FOR_SIZE leads to __ilog2_u32() being duplicated 50 times and __ilog2_u64() 3 times in vmlinux on a tiny powerpc32 config. __ilog2_u32() being 2 instructions it is not worth being kept out of line, so force inlining. Allthough the u64 version is a bit bigger, there is still a small benefit in keeping it inlined. On a 64 bits config there's a real benefit. With this change the size of vmlinux text is reduced by 1 kbytes, which is approx 50% more than the size of the removed functions. Before the patch there is for instance: c00d2a94 <__ilog2_u32>: c00d2a94: 7c 63 00 34 cntlzw r3,r3 c00d2a98: 20 63 00 1f subfic r3,r3,31 c00d2a9c: 4e 80 00 20 blr c00d36d8 <__order_base_2>: c00d36d8: 28 03 00 01 cmplwi r3,1 c00d36dc: 40 81 00 2c ble c00d3708 <__order_base_2+0x30> c00d36e0: 94 21 ff f0 stwu r1,-16(r1) c00d36e4: 7c 08 02 a6 mflr r0 c00d36e8: 38 63 ff ff addi r3,r3,-1 c00d36ec: 90 01 00 14 stw r0,20(r1) c00d36f0: 4b ff f3 a5 bl c00d2a94 <__ilog2_u32> c00d36f4: 80 01 00 14 lwz r0,20(r1) c00d36f8: 38 63 00 01 addi r3,r3,1 c00d36fc: 7c 08 03 a6 mtlr r0 c00d3700: 38 21 00 10 addi r1,r1,16 c00d3704: 4e 80 00 20 blr c00d3708: 38 60 00 00 li r3,0 c00d370c: 4e 80 00 20 blr With the patch it has become: c00d356c <__order_base_2>: c00d356c: 28 03 00 01 cmplwi r3,1 c00d3570: 40 81 00 14 ble c00d3584 <__order_base_2+0x18> c00d3574: 38 63 ff ff addi r3,r3,-1 c00d3578: 7c 63 00 34 cntlzw r3,r3 c00d357c: 20 63 00 20 subfic r3,r3,32 c00d3580: 4e 80 00 20 blr c00d3584: 38 60 00 00 li r3,0 c00d3588: 4e 80 00 20 blr No more need for __order_base_2() to setup a stack frame and save/restore caller address. And the following 'add 1' is merged in the subtract. Another typical use of it: c080ff28 : ... c080fff8: 7f c3 f3 78 mr r3,r30 c080fffc: 4b 8f 81 f1 bl c01081ec <__ilog2_u32> c0810000: 38 63 ff f2 addi r3,r3,-14 ... Becomes c080ff1c : ... c080ffec: 7f c3 00 34 cntlzw r3,r30 c080fff0: 20 63 00 11 subfic r3,r3,17 ... Here no need to move r30 argument to r3 then substract 14 to result. Just work on r30 and merge the 'sub 14' with the 'sub from 31'. Link: https://lkml.kernel.org/r/803a2ac3d923ebcfd0dd40f5886b05cae7bb0aba.1644243860.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/log2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/log2.h b/include/linux/log2.h index df0b155c2141..9f30d087a128 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -18,7 +18,7 @@ * - the arch is not required to handle n==0 if implementing the fallback */ #ifndef CONFIG_ARCH_HAS_ILOG2_U32 -static inline __attribute__((const)) +static __always_inline __attribute__((const)) int __ilog2_u32(u32 n) { return fls(n) - 1; @@ -26,7 +26,7 @@ int __ilog2_u32(u32 n) #endif #ifndef CONFIG_ARCH_HAS_ILOG2_U64 -static inline __attribute__((const)) +static __always_inline __attribute__((const)) int __ilog2_u64(u64 n) { return fls64(n) - 1; -- cgit v1.2.3-71-gd317 From 25cb5b7ac6a7f7943e22a6831b74a6aa964d2a0c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 23 Mar 2022 16:05:47 -0700 Subject: bitfield: add explicit inclusions to the example MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's not obvious that bitfield.h doesn't guarantee the bits.h inclusion and the example in the former is confusing. Some developers think that it's okay to just include bitfield.h to get it working. Change example to explicitly include necessary headers in order to avoid confusion. Link: https://lkml.kernel.org/r/20220207123341.47533-1-andriy.shevchenko@linux.intel.com Fixes: 3e9b3112ec74 ("add basic register-field manipulation macros") Depends-on: 8bd9cb51daac ("locking/atomics, asm-generic: Move some macros from to a new file") Signed-off-by: Andy Shevchenko Reported-by: Jan Dąbroś Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bitfield.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 6093fa6db260..c9be1657f03d 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -19,6 +19,9 @@ * * Example: * + * #include + * #include + * * #define REG_FIELD_A GENMASK(6, 0) * #define REG_FIELD_B BIT(7) * #define REG_FIELD_C GENMASK(15, 8) -- cgit v1.2.3-71-gd317 From abc7da58c4b388ab9f6a756c0f297c29d3af665f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 23 Mar 2022 16:06:11 -0700 Subject: init.h: improve __setup and early_param documentation Igor noted in [1] that there are quite a few __setup() handling functions that return incorrect values. Doing this can be harmless, but it can also cause strings to be added to init's argument or environment list, polluting them. Since __setup() handling and return values are not documented, first add documentation for that. Also add more documentation for early_param() handling and return values. For __setup() functions, returning 0 (not handled) has questionable value if it is just a malformed option value, as in rodata=junk since returning 0 would just cause "rodata=junk" to be added to init's environment unnecessarily: Run /sbin/init as init process with arguments: /sbin/init with environment: HOME=/ TERM=linux splash=native rodata=junk Also, there are no recommendations on whether to print a warning when an unknown parameter value is seen. I am not addressing that here. [1] lore.kernel.org/r/64644a2f-4a20-bab3-1e15-3b2cdd0defe3@omprussia.ru Link: https://lkml.kernel.org/r/20220221050852.1147-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Reported-by: Igor Zhbanov Cc: Ingo Molnar Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index d82b4b2e1d25..baf0b29a7010 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -320,12 +320,19 @@ struct obs_kernel_param { __aligned(__alignof__(struct obs_kernel_param)) \ = { __setup_str_##unique_id, fn, early } +/* + * NOTE: __setup functions return values: + * @fn returns 1 (or non-zero) if the option argument is "handled" + * and returns 0 if the option argument is "not handled". + */ #define __setup(str, fn) \ __setup_param(str, fn, fn, 0) /* - * NOTE: fn is as per module_param, not __setup! - * Emits warning if fn returns non-zero. + * NOTE: @fn is as per module_param, not __setup! + * I.e., @fn returns 0 for no error or non-zero for error + * (possibly @fn returns a -errno value, but it does not matter). + * Emits warning if @fn returns non-zero. */ #define early_param(str, fn) \ __setup_param(str, fn, fn, 1) -- cgit v1.2.3-71-gd317 From f05fa10901aa4b11a84257ecb7ac7f8fe7c4ee1b Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 23 Mar 2022 16:06:33 -0700 Subject: kexec: make crashk_res, crashk_low_res and crash_notes symbols always visible Patch series "kexec: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef", v2. Replace the conditional compilation using "#ifdef CONFIG_KEXEC_CORE" by a check for "IS_ENABLED(CONFIG_KEXEC_CORE)", to simplify the code and increase compile coverage. I only modified x86, arm, arm64 and riscv, other architectures such as sh, powerpc and s390 are better to be kept kexec code as-is so they are not touched. This patch (of 5): Make the forward declarations of crashk_res, crashk_low_res and crash_notes always visible. Code referring to these symbols can then just check for IS_ENABLED(CONFIG_KEXEC_CORE), instead of requiring conditional compilation using an #ifdef, thus preparing to increase compile coverage and simplify the code. Link: https://lkml.kernel.org/r/20211206160514.2000-1-jszhang@kernel.org Link: https://lkml.kernel.org/r/20211206160514.2000-2-jszhang@kernel.org Signed-off-by: Jisheng Zhang Acked-by: Baoquan He Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Eric W. Biederman Cc: Alexandre Ghiti Cc: Palmer Dabbelt Cc: Russell King (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kexec.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 0c994ae37729..58d1b58a971e 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -20,6 +20,12 @@ #include +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +extern struct resource crashk_low_res; +extern note_buf_t __percpu *crash_notes; + #ifdef CONFIG_KEXEC_CORE #include #include @@ -350,12 +356,6 @@ extern int kexec_load_disabled; #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ KEXEC_FILE_NO_INITRAMFS) -/* Location of a reserved region to hold the crash kernel. - */ -extern struct resource crashk_res; -extern struct resource crashk_low_res; -extern note_buf_t __percpu *crash_notes; - /* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; -- cgit v1.2.3-71-gd317 From d645552e9bd96671079b27015294ec7f9748fa2b Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 24 Mar 2022 15:02:40 +0100 Subject: netfilter: egress: Report interface as outgoing Otherwise packets in egress chains seem like they are being received by the interface, not sent out via it. Fixes: 42df6e1d221dd ("netfilter: Introduce egress hook") Signed-off-by: Phil Sutter Signed-off-by: Florian Westphal --- include/linux/netfilter_netdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h index e6487a691136..8676316547cc 100644 --- a/include/linux/netfilter_netdev.h +++ b/include/linux/netfilter_netdev.h @@ -99,7 +99,7 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, return skb; nf_hook_state_init(&state, NF_NETDEV_EGRESS, - NFPROTO_NETDEV, dev, NULL, NULL, + NFPROTO_NETDEV, NULL, dev, NULL, dev_net(dev), NULL); /* nf assumes rcu_read_lock, not just read_lock_bh */ -- cgit v1.2.3-71-gd317 From bb43b14b576228c580bdc7e1aeeded54d540b5ef Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 24 Mar 2022 18:09:49 -0700 Subject: mm: delete __ClearPageWaiters() The PG_waiters bit is not included in PAGE_FLAGS_CHECK_AT_FREE, and vmscan.c's free_unref_page_list() callers rely on that not to generate bad_page() alerts. So __page_cache_release(), put_pages_list() and release_pages() (and presumably copy-and-pasted free_zone_device_page()) are redundant and misleading to make a special point of clearing it (as the "__" implies, it could only safely be used on the freeing path). Delete __ClearPageWaiters(). Remark on this in one of the "possible" comments in folio_wake_bit(), and delete the superfluous comments. Link: https://lkml.kernel.org/r/3eafa969-5b1a-accf-88fe-318784c791a@google.com Signed-off-by: Hugh Dickins Tested-by: Yu Zhao Reviewed-by: Yang Shi Reviewed-by: David Hildenbrand Cc: Matthew Wilcox Cc: Nicholas Piggin Cc: Yu Zhao Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 2 +- mm/filemap.c | 23 ++++++++--------------- mm/memremap.c | 2 -- mm/swap.c | 4 ---- 4 files changed, 9 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 88fe1d759cdd..9d8eeaa67d05 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -481,7 +481,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) -PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) +PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) diff --git a/mm/filemap.c b/mm/filemap.c index d2e6a79fe69d..98dfeff82ef0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1185,24 +1185,17 @@ static void folio_wake_bit(struct folio *folio, int bit_nr) } /* - * It is possible for other pages to have collided on the waitqueue - * hash, so in that case check for a page match. That prevents a long- - * term waiter + * It's possible to miss clearing waiters here, when we woke our page + * waiters, but the hashed waitqueue has waiters for other pages on it. + * That's okay, it's a rare case. The next waker will clear it. * - * It is still possible to miss a case here, when we woke page waiters - * and removed them from the waitqueue, but there are still other - * page waiters. + * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE, + * other), the flag may be cleared in the course of freeing the page; + * but that is not required for correctness. */ - if (!waitqueue_active(q) || !key.page_match) { + if (!waitqueue_active(q) || !key.page_match) folio_clear_waiters(folio); - /* - * It's possible to miss clearing Waiters here, when we woke - * our page waiters, but the hashed waitqueue has waiters for - * other pages on it. - * - * That's okay, it's a rare case. The next waker will clear it. - */ - } + spin_unlock_irqrestore(&q->lock, flags); } diff --git a/mm/memremap.c b/mm/memremap.c index c17eca4a48ca..af0223605e69 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -456,8 +456,6 @@ void free_zone_device_page(struct page *page) if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) return; - __ClearPageWaiters(page); - mem_cgroup_uncharge(page_folio(page)); /* diff --git a/mm/swap.c b/mm/swap.c index 5b30045207e1..bceff0cb559c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -97,7 +97,6 @@ static void __page_cache_release(struct page *page) mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); } - __ClearPageWaiters(page); } static void __put_single_page(struct page *page) @@ -152,7 +151,6 @@ void put_pages_list(struct list_head *pages) continue; } /* Cannot be PageLRU because it's passed to us using the lru */ - __ClearPageWaiters(page); } free_unref_page_list(pages); @@ -971,8 +969,6 @@ void release_pages(struct page **pages, int nr) count_vm_event(UNEVICTABLE_PGCLEARED); } - __ClearPageWaiters(page); - list_add(&page->lru, &pages_to_free); } if (lruvec) -- cgit v1.2.3-71-gd317 From 7c13c163e036c646b77753deacfe2f5478b654bc Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:10 -0700 Subject: kasan, page_alloc: merge kasan_free_pages into free_pages_prepare Currently, the code responsible for initializing and poisoning memory in free_pages_prepare() is scattered across two locations: kasan_free_pages() for HW_TAGS KASAN and free_pages_prepare() itself. This is confusing. This and a few following patches combine the code from these two locations. Along the way, these patches also simplify the performed checks to make them easier to follow. Replaces the only caller of kasan_free_pages() with its implementation. As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS is enabled, moving the code does no functional changes. This patch is not useful by itself but makes the simplifications in the following patches easier to follow. Link: https://lkml.kernel.org/r/303498d15840bb71905852955c6e2390ecc87139.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Acked-by: Marco Elver Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 8 -------- mm/kasan/common.c | 2 +- mm/kasan/hw_tags.c | 11 ----------- mm/page_alloc.c | 6 ++++-- 4 files changed, 5 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b6a93261c92a..dd2161af84e9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -85,7 +85,6 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN_HW_TAGS void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); -void kasan_free_pages(struct page *page, unsigned int order); #else /* CONFIG_KASAN_HW_TAGS */ @@ -96,13 +95,6 @@ static __always_inline void kasan_alloc_pages(struct page *page, BUILD_BUG(); } -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - #endif /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_has_integrated_init(void) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 92196562687b..a0082fad48b1 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -387,7 +387,7 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) } /* - * The object will be poisoned by kasan_free_pages() or + * The object will be poisoned by kasan_poison_pages() or * kasan_slab_free_mempool(). */ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 7355cb534e4f..0b8225add2e4 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -213,17 +213,6 @@ void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) } } -void kasan_free_pages(struct page *page, unsigned int order) -{ - /* - * This condition should match the one in free_pages_prepare() in - * page_alloc.c. - */ - bool init = want_init_on_free(); - - kasan_poison_pages(page, order, init); -} - #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) void kasan_enable_tagging_sync(void) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0721ff0c90be..d16df446d2ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1364,15 +1364,17 @@ static __always_inline bool free_pages_prepare(struct page *page, /* * As memory initialization might be integrated into KASAN, - * kasan_free_pages and kernel_init_free_pages must be + * KASAN poisoning and memory initialization code must be * kept together to avoid discrepancies in behavior. * * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ if (kasan_has_integrated_init()) { + bool init = want_init_on_free(); + if (!skip_kasan_poison) - kasan_free_pages(page, order); + kasan_poison_pages(page, order, init); } else { bool init = want_init_on_free(); -- cgit v1.2.3-71-gd317 From c82ce3195fd17e57a370e4dc8f36c195b8807b95 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:22 -0700 Subject: mm: clarify __GFP_ZEROTAGS comment __GFP_ZEROTAGS is intended as an optimization: if memory is zeroed during allocation, it's possible to set memory tags at the same time with little performance impact. Clarify this intention of __GFP_ZEROTAGS in the comment. Link: https://lkml.kernel.org/r/cdffde013973c5634a447513e10ec0d21e8eee29.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 20f6fbe12993..72dc4ca75cf3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -232,8 +232,10 @@ struct vm_area_struct; * * %__GFP_ZERO returns a zeroed page on success. * - * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if - * __GFP_ZERO is set. + * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself + * is being zeroed (either via __GFP_ZERO or via init_on_alloc). This flag is + * intended for optimization: setting memory tags at the same time as zeroing + * memory has minimal additional performace impact. * * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned * on deallocation. Typically used for userspace pages. Currently only has an -- cgit v1.2.3-71-gd317 From b42090ae6f3aa07b0a39403545d688489548a6a8 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:31 -0700 Subject: kasan, page_alloc: merge kasan_alloc_pages into post_alloc_hook Currently, the code responsible for initializing and poisoning memory in post_alloc_hook() is scattered across two locations: kasan_alloc_pages() hook for HW_TAGS KASAN and post_alloc_hook() itself. This is confusing. This and a few following patches combine the code from these two locations. Along the way, these patches do a step-by-step restructure the many performed checks to make them easier to follow. Replace the only caller of kasan_alloc_pages() with its implementation. As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS is enabled, moving the code does no functional changes. Also move init and init_tags variables definitions out of kasan_has_integrated_init() clause in post_alloc_hook(), as they have the same values regardless of what the if condition evaluates to. This patch is not useful by itself but makes the simplifications in the following patches easier to follow. Link: https://lkml.kernel.org/r/5ac7e0b30f5cbb177ec363ddd7878a3141289592.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 9 --------- mm/kasan/common.c | 2 +- mm/kasan/hw_tags.c | 22 ---------------------- mm/page_alloc.c | 20 +++++++++++++++----- 4 files changed, 16 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index dd2161af84e9..4ed94616c8f0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -84,17 +84,8 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN_HW_TAGS -void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); - #else /* CONFIG_KASAN_HW_TAGS */ -static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order, gfp_t flags) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - #endif /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_has_integrated_init(void) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index a0082fad48b1..d9079ec11f31 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -538,7 +538,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, return NULL; /* - * The object has already been unpoisoned by kasan_alloc_pages() for + * The object has already been unpoisoned by kasan_unpoison_pages() for * alloc_pages() or by kasan_krealloc() for krealloc(). */ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index c643740b8599..76cf2b6229c7 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -192,28 +192,6 @@ void __init kasan_init_hw_tags(void) kasan_stack_collection_enabled() ? "on" : "off"); } -void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) -{ - /* - * This condition should match the one in post_alloc_hook() in - * page_alloc.c. - */ - bool init = !want_init_on_free() && want_init_on_alloc(flags); - bool init_tags = init && (flags & __GFP_ZEROTAGS); - - if (flags & __GFP_SKIP_KASAN_POISON) - SetPageSkipKASanPoison(page); - - if (init_tags) { - int i; - - for (i = 0; i != 1 << order; ++i) - tag_clear_highpage(page + i); - } else { - kasan_unpoison_pages(page, order, init); - } -} - #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) void kasan_enable_tagging_sync(void) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d2d6e48d2d83..db82facf14e7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2346,6 +2346,9 @@ static inline bool check_new_pcp(struct page *page, unsigned int order) inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { + bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); + set_page_private(page, 0); set_page_refcounted(page); @@ -2361,15 +2364,22 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* * As memory initialization might be integrated into KASAN, - * kasan_alloc_pages and kernel_init_free_pages must be + * KASAN unpoisoning and memory initializion code must be * kept together to avoid discrepancies in behavior. */ if (kasan_has_integrated_init()) { - kasan_alloc_pages(page, order, gfp_flags); - } else { - bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); - bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); + if (gfp_flags & __GFP_SKIP_KASAN_POISON) + SetPageSkipKASanPoison(page); + + if (init_tags) { + int i; + for (i = 0; i != 1 << order; ++i) + tag_clear_highpage(page + i); + } else { + kasan_unpoison_pages(page, order, init); + } + } else { kasan_unpoison_pages(page, order, init); if (init_tags) { -- cgit v1.2.3-71-gd317 From 63840de296472f3914bb933b11ba2b764590755e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:52 -0700 Subject: kasan, x86, arm64, s390: rename functions for modules shadow Rename kasan_free_shadow to kasan_free_module_shadow and kasan_module_alloc to kasan_alloc_module_shadow. These functions are used to allocate/free shadow memory for kernel modules when KASAN_VMALLOC is not enabled. The new names better reflect their purpose. Also reword the comment next to their declaration to improve clarity. Link: https://lkml.kernel.org/r/36db32bde765d5d0b856f77d2d806e838513fe84.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Catalin Marinas Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/module.c | 2 +- arch/s390/kernel/module.c | 2 +- arch/x86/kernel/module.c | 2 +- include/linux/kasan.h | 14 +++++++------- mm/kasan/shadow.c | 4 ++-- mm/vmalloc.c | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 309a27553c87..d3a1fa818348 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -58,7 +58,7 @@ void *module_alloc(unsigned long size) PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); - if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { + if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); return NULL; } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index b032e556eeb7..a7aefc278909 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -45,7 +45,7 @@ void *module_alloc(unsigned long size) p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, __builtin_return_address(0)); - if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { + if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); return NULL; } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 96d7c27b7093..504ea65987e8 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -78,7 +78,7 @@ void *module_alloc(unsigned long size) MODULES_END, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, __builtin_return_address(0)); - if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { + if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); return NULL; } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 4ed94616c8f0..b450c7653137 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -433,17 +433,17 @@ static inline void kasan_populate_early_vm_area_shadow(void *start, !defined(CONFIG_KASAN_VMALLOC) /* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. + * These functions allocate and free shadow memory for kernel modules. + * They are only required when KASAN_VMALLOC is not supported, as otherwise + * shadow memory is allocated by the generic vmalloc handlers. */ -int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask); -void kasan_free_shadow(const struct vm_struct *vm); +int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); +void kasan_free_module_shadow(const struct vm_struct *vm); #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ -static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} +static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } +static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 94136f84b449..e5c4393eb861 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -498,7 +498,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, #else /* CONFIG_KASAN_VMALLOC */ -int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) +int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { void *ret; size_t scaled_size; @@ -534,7 +534,7 @@ int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) return -ENOMEM; } -void kasan_free_shadow(const struct vm_struct *vm) +void kasan_free_module_shadow(const struct vm_struct *vm) { if (vm->flags & VM_KASAN) vfree(kasan_mem_to_shadow(vm->addr)); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 99e0f3e8d1a5..feecd614bb77 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2547,7 +2547,7 @@ struct vm_struct *remove_vm_area(const void *addr) va->vm = NULL; spin_unlock(&vmap_area_lock); - kasan_free_shadow(vm); + kasan_free_module_shadow(vm); free_unmap_vmap_area(va); return vm; -- cgit v1.2.3-71-gd317 From 0b7ccc70ee1d5b499d1626c9d28f729507b1c036 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:55 -0700 Subject: kasan, vmalloc: drop outdated VM_KASAN comment The comment about VM_KASAN in include/linux/vmalloc.c is outdated. VM_KASAN is currently only used to mark vm_areas allocated for kernel modules when CONFIG_KASAN_VMALLOC is disabled. Drop the comment. Link: https://lkml.kernel.org/r/780395afea83a147b3b5acc36cf2e38f7f8479f9.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Acked-by: Marco Elver Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 5a0c3b556848..2ca95c7db463 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -35,17 +35,6 @@ struct notifier_block; /* in notifier.h */ #define VM_DEFER_KMEMLEAK 0 #endif -/* - * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC. - * - * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after - * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poison shadow on free if it was never allocated. - * - * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to - * determine which allocations need the module shadow freed. - */ - /* bits [20..32] reserved for arch specific ioremap internals */ /* -- cgit v1.2.3-71-gd317 From 5bd9bae22a455321327982d0b595a7e76d425fd0 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:10:58 -0700 Subject: kasan: reorder vmalloc hooks Group functions that [de]populate shadow memory for vmalloc. Group functions that [un]poison memory for vmalloc. This patch does no functional changes but prepares KASAN code for adding vmalloc support to HW_TAGS KASAN. Link: https://lkml.kernel.org/r/aeef49eb249c206c4c9acce2437728068da74c28.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Acked-by: Marco Elver Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 20 +++++++++----------- mm/kasan/shadow.c | 43 ++++++++++++++++++++++--------------------- 2 files changed, 31 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b450c7653137..5f9ed1372eef 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -397,34 +397,32 @@ static inline void kasan_init_hw_tags(void) { } #ifdef CONFIG_KASAN_VMALLOC +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int kasan_populate_vmalloc(unsigned long addr, unsigned long size); -void kasan_poison_vmalloc(const void *start, unsigned long size); -void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); +void kasan_unpoison_vmalloc(const void *start, unsigned long size); +void kasan_poison_vmalloc(const void *start, unsigned long size); #else /* CONFIG_KASAN_VMALLOC */ +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) { } static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { return 0; } - -static inline void kasan_poison_vmalloc(const void *start, unsigned long size) -{ } -static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ } static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) {} + unsigned long free_region_end) { } -static inline void kasan_populate_early_vm_area_shadow(void *start, - unsigned long size) +static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) +{ } +static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } #endif /* CONFIG_KASAN_VMALLOC */ diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index e5c4393eb861..bf7ab62fbfb9 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -345,27 +345,6 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) return 0; } -/* - * Poison the shadow for a vmalloc region. Called as part of the - * freeing process at the time the region is freed. - */ -void kasan_poison_vmalloc(const void *start, unsigned long size) -{ - if (!is_vmalloc_or_module_addr(start)) - return; - - size = round_up(size, KASAN_GRANULE_SIZE); - kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); -} - -void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ - if (!is_vmalloc_or_module_addr(start)) - return; - - kasan_unpoison(start, size, false); -} - static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, void *unused) { @@ -496,6 +475,28 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, } } + +void kasan_unpoison_vmalloc(const void *start, unsigned long size) +{ + if (!is_vmalloc_or_module_addr(start)) + return; + + kasan_unpoison(start, size, false); +} + +/* + * Poison the shadow for a vmalloc region. Called as part of the + * freeing process at the time the region is freed. + */ +void kasan_poison_vmalloc(const void *start, unsigned long size) +{ + if (!is_vmalloc_or_module_addr(start)) + return; + + size = round_up(size, KASAN_GRANULE_SIZE); + kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); +} + #else /* CONFIG_KASAN_VMALLOC */ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) -- cgit v1.2.3-71-gd317 From 579fb0ac085be2015529a5f7bd1061899a6374de Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:01 -0700 Subject: kasan: add wrappers for vmalloc hooks Add wrappers around functions that [un]poison memory for vmalloc allocations. These functions will be used by HW_TAGS KASAN and therefore need to be disabled when kasan=off command line argument is provided. This patch does no functional changes for software KASAN modes. Link: https://lkml.kernel.org/r/3b8728eac438c55389fb0f9a8a2145d71dd77487.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Acked-by: Marco Elver Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 17 +++++++++++++++-- mm/kasan/shadow.c | 5 ++--- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5f9ed1372eef..a6d6fdefb278 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -403,8 +403,21 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -void kasan_unpoison_vmalloc(const void *start, unsigned long size); -void kasan_poison_vmalloc(const void *start, unsigned long size); +void __kasan_unpoison_vmalloc(const void *start, unsigned long size); +static __always_inline void kasan_unpoison_vmalloc(const void *start, + unsigned long size) +{ + if (kasan_enabled()) + __kasan_unpoison_vmalloc(start, size); +} + +void __kasan_poison_vmalloc(const void *start, unsigned long size); +static __always_inline void kasan_poison_vmalloc(const void *start, + unsigned long size) +{ + if (kasan_enabled()) + __kasan_poison_vmalloc(start, size); +} #else /* CONFIG_KASAN_VMALLOC */ diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index bf7ab62fbfb9..39d0b32ebf70 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -475,8 +475,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, } } - -void kasan_unpoison_vmalloc(const void *start, unsigned long size) +void __kasan_unpoison_vmalloc(const void *start, unsigned long size) { if (!is_vmalloc_or_module_addr(start)) return; @@ -488,7 +487,7 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size) * Poison the shadow for a vmalloc region. Called as part of the * freeing process at the time the region is freed. */ -void kasan_poison_vmalloc(const void *start, unsigned long size) +void __kasan_poison_vmalloc(const void *start, unsigned long size) { if (!is_vmalloc_or_module_addr(start)) return; -- cgit v1.2.3-71-gd317 From 1d96320f8d5320423737da61e6e6937f6b475b5c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:13 -0700 Subject: kasan, vmalloc: add vmalloc tagging for SW_TAGS Add vmalloc tagging support to SW_TAGS KASAN. - __kasan_unpoison_vmalloc() now assigns a random pointer tag, poisons the virtual mapping accordingly, and embeds the tag into the returned pointer. - __get_vm_area_node() (used by vmalloc() and vmap()) and pcpu_get_vm_areas() save the tagged pointer into vm_struct->addr (note: not into vmap_area->addr). This requires putting kasan_unpoison_vmalloc() after setup_vmalloc_vm[_locked](); otherwise the latter will overwrite the tagged pointer. The tagged pointer then is naturally propagateed to vmalloc() and vmap(). - vm_map_ram() returns the tagged pointer directly. As a result of this change, vm_struct->addr is now tagged. Enabling KASAN_VMALLOC with SW_TAGS is not yet allowed. Link: https://lkml.kernel.org/r/4a78f3c064ce905e9070c29733aca1dd254a74f1.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 16 ++++++++++------ mm/kasan/shadow.c | 6 ++++-- mm/vmalloc.c | 14 ++++++++------ 3 files changed, 22 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index a6d6fdefb278..d3efb6cbd0f5 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -403,12 +403,13 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -void __kasan_unpoison_vmalloc(const void *start, unsigned long size); -static __always_inline void kasan_unpoison_vmalloc(const void *start, - unsigned long size) +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size); +static __always_inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size) { if (kasan_enabled()) - __kasan_unpoison_vmalloc(start, size); + return __kasan_unpoison_vmalloc(start, size); + return (void *)start; } void __kasan_poison_vmalloc(const void *start, unsigned long size); @@ -433,8 +434,11 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long free_region_start, unsigned long free_region_end) { } -static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ } +static inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size) +{ + return (void *)start; +} static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 39d0b32ebf70..5a866f6663fc 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -475,12 +475,14 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, } } -void __kasan_unpoison_vmalloc(const void *start, unsigned long size) +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size) { if (!is_vmalloc_or_module_addr(start)) - return; + return (void *)start; + start = set_tag(start, kasan_random_tag()); kasan_unpoison(start, size, false); + return (void *)start; } /* diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ab0a0002bfca..1ab1f1b2f5b7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2231,7 +2231,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) mem = (void *)addr; } - kasan_unpoison_vmalloc(mem, size); + mem = kasan_unpoison_vmalloc(mem, size); if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { @@ -2464,10 +2464,10 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, return NULL; } - kasan_unpoison_vmalloc((void *)va->va_start, requested_size); - setup_vmalloc_vm(area, va, flags, caller); + area->addr = kasan_unpoison_vmalloc(area->addr, requested_size); + return area; } @@ -3815,9 +3815,6 @@ retry: for (area = 0; area < nr_vms; area++) { if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) goto err_free_shadow; - - kasan_unpoison_vmalloc((void *)vas[area]->va_start, - sizes[area]); } /* insert all vm's */ @@ -3830,6 +3827,11 @@ retry: } spin_unlock(&vmap_area_lock); + /* mark allocated areas as accessible */ + for (area = 0; area < nr_vms; area++) + vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, + vms[area]->size); + kfree(vas); return vms; -- cgit v1.2.3-71-gd317 From 01d92c7f358ce892279ca830cf6ccf2862a17d1c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:16 -0700 Subject: kasan, vmalloc, arm64: mark vmalloc mappings as pgprot_tagged HW_TAGS KASAN relies on ARM Memory Tagging Extension (MTE). With MTE, a memory region must be mapped as MT_NORMAL_TAGGED to allow setting memory tags via MTE-specific instructions. Add proper protection bits to vmalloc() allocations. These allocations are always backed by page_alloc pages, so the tags will actually be getting set on the corresponding physical memory. Link: https://lkml.kernel.org/r/983fc33542db2f6b1e77b34ca23448d4640bbb9e.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/vmalloc.h | 6 ++++++ include/linux/vmalloc.h | 7 +++++++ mm/vmalloc.c | 9 +++++++++ 3 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index b9185503feae..38fafffe699f 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -25,4 +25,10 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) #endif +#define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged +static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) +{ + return pgprot_tagged(prot); +} + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 2ca95c7db463..3b1df7da402d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -115,6 +115,13 @@ static inline int arch_vmap_pte_supported_shift(unsigned long size) } #endif +#ifndef arch_vmap_pgprot_tagged +static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) +{ + return prot; +} +#endif + /* * Highlevel APIs for driver use */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1ab1f1b2f5b7..8530d86c3e58 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3128,6 +3128,15 @@ again: goto fail; } + /* + * Modify protection bits to allow tagging. + * This must be done before mapping by __vmalloc_area_node(). + */ + if (kasan_hw_tags_enabled() && + pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) + prot = arch_vmap_pgprot_tagged(prot); + + /* Allocate physical pages and map them into vmalloc space. */ addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!addr) goto fail; -- cgit v1.2.3-71-gd317 From f49d9c5bb15cc5c470097078ec1f26ff605ae1a2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:23 -0700 Subject: kasan, mm: only define ___GFP_SKIP_KASAN_POISON with HW_TAGS Only define the ___GFP_SKIP_KASAN_POISON flag when CONFIG_KASAN_HW_TAGS is enabled. This patch it not useful by itself, but it prepares the code for additions of new KASAN-specific GFP patches. Link: https://lkml.kernel.org/r/44e5738a584c11801b2b8f1231898918efc8634a.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 8 +++++++- include/trace/events/mmflags.h | 12 +++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 72dc4ca75cf3..75abf8d5bc9b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -54,7 +54,11 @@ struct vm_area_struct; #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u +#ifdef CONFIG_KASAN_HW_TAGS #define ___GFP_SKIP_KASAN_POISON 0x1000000u +#else +#define ___GFP_SKIP_KASAN_POISON 0 +#endif #ifdef CONFIG_LOCKDEP #define ___GFP_NOLOCKDEP 0x2000000u #else @@ -251,7 +255,9 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (24 + \ + IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 116ed4d5d0f8..cb4520374e2c 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -49,12 +49,18 @@ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ - {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ - {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ + +#ifdef CONFIG_KASAN_HW_TAGS +#define __def_gfpflag_names_kasan \ + , {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"} +#else +#define __def_gfpflag_names_kasan +#endif #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names \ + __def_gfpflag_names __def_gfpflag_names_kasan \ ) : "none" #ifdef CONFIG_MMU -- cgit v1.2.3-71-gd317 From 53ae233c30a623ff44ff2f83854e92530c5d9fc2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:26 -0700 Subject: kasan, page_alloc: allow skipping unpoisoning for HW_TAGS Add a new GFP flag __GFP_SKIP_KASAN_UNPOISON that allows skipping KASAN poisoning for page_alloc allocations. The flag is only effective with HW_TAGS KASAN. This flag will be used by vmalloc code for page_alloc allocations backing vmalloc() mappings in a following patch. The reason to skip KASAN poisoning for these pages in page_alloc is because vmalloc code will be poisoning them instead. Also reword the comment for __GFP_SKIP_KASAN_POISON. Link: https://lkml.kernel.org/r/35c97d77a704f6ff971dd3bfe4be95855744108e.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 21 +++++++++++++-------- include/trace/events/mmflags.h | 5 +++-- mm/page_alloc.c | 31 ++++++++++++++++++++++--------- 3 files changed, 38 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 75abf8d5bc9b..688a39fde43e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -55,12 +55,14 @@ struct vm_area_struct; #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u #ifdef CONFIG_KASAN_HW_TAGS -#define ___GFP_SKIP_KASAN_POISON 0x1000000u +#define ___GFP_SKIP_KASAN_UNPOISON 0x1000000u +#define ___GFP_SKIP_KASAN_POISON 0x2000000u #else +#define ___GFP_SKIP_KASAN_UNPOISON 0 #define ___GFP_SKIP_KASAN_POISON 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x2000000u +#define ___GFP_NOLOCKDEP 0x4000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -241,22 +243,25 @@ struct vm_area_struct; * intended for optimization: setting memory tags at the same time as zeroing * memory has minimal additional performace impact. * - * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned - * on deallocation. Typically used for userspace pages. Currently only has an - * effect in HW tags mode. + * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. + * Only effective in HW_TAGS mode. + * + * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. + * Typically, used for userspace pages. Only effective in HW_TAGS mode. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) +#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (24 + \ - IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ +#define __GFP_BITS_SHIFT (24 + \ + 2 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index cb4520374e2c..134c45e62d91 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -52,8 +52,9 @@ {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ #ifdef CONFIG_KASAN_HW_TAGS -#define __def_gfpflag_names_kasan \ - , {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"} +#define __def_gfpflag_names_kasan , \ + {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ + {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} #else #define __def_gfpflag_names_kasan #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 350bb27dc4bb..59a9408e1e7d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2343,6 +2343,26 @@ static inline bool check_new_pcp(struct page *page, unsigned int order) } #endif /* CONFIG_DEBUG_VM */ +static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) +{ + /* Don't skip if a software KASAN mode is enabled. */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + return false; + + /* Skip, if hardware tag-based KASAN is not enabled. */ + if (!kasan_hw_tags_enabled()) + return true; + + /* + * With hardware tag-based KASAN enabled, skip if either: + * + * 1. Memory tags have already been cleared via tag_clear_highpage(). + * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON. + */ + return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON); +} + inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { @@ -2382,15 +2402,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* Note that memory is already initialized by the loop above. */ init = false; } - /* - * If either a software KASAN mode is enabled, or, - * in the case of hardware tag-based KASAN, - * if memory tags have not been cleared via tag_clear_highpage(). - */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC) || - IS_ENABLED(CONFIG_KASAN_SW_TAGS) || - kasan_hw_tags_enabled() && !init_tags) { - /* Mark shadow memory or set memory tags. */ + if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) { + /* Unpoison shadow memory or set memory tags. */ kasan_unpoison_pages(page, order, init); /* Note that memory is already initialized by KASAN. */ -- cgit v1.2.3-71-gd317 From 9353ffa6e9e90d2b6348209cf2b95a8ffee18711 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:29 -0700 Subject: kasan, page_alloc: allow skipping memory init for HW_TAGS Add a new GFP flag __GFP_SKIP_ZERO that allows to skip memory initialization. The flag is only effective with HW_TAGS KASAN. This flag will be used by vmalloc code for page_alloc allocations backing vmalloc() mappings in a following patch. The reason to skip memory initialization for these pages in page_alloc is because vmalloc code will be initializing them instead. With the current implementation, when __GFP_SKIP_ZERO is provided, __GFP_ZEROTAGS is ignored. This doesn't matter, as these two flags are never provided at the same time. However, if this is changed in the future, this particular implementation detail can be changed as well. Link: https://lkml.kernel.org/r/0d53efeff345de7d708e0baa0d8829167772521e.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 18 +++++++++++------- include/trace/events/mmflags.h | 1 + mm/page_alloc.c | 13 ++++++++++++- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 688a39fde43e..0fa17fb85de5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -55,14 +55,16 @@ struct vm_area_struct; #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u #ifdef CONFIG_KASAN_HW_TAGS -#define ___GFP_SKIP_KASAN_UNPOISON 0x1000000u -#define ___GFP_SKIP_KASAN_POISON 0x2000000u +#define ___GFP_SKIP_ZERO 0x1000000u +#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u +#define ___GFP_SKIP_KASAN_POISON 0x4000000u #else +#define ___GFP_SKIP_ZERO 0 #define ___GFP_SKIP_KASAN_UNPOISON 0 #define ___GFP_SKIP_KASAN_POISON 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +#define ___GFP_NOLOCKDEP 0x8000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -239,9 +241,10 @@ struct vm_area_struct; * %__GFP_ZERO returns a zeroed page on success. * * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself - * is being zeroed (either via __GFP_ZERO or via init_on_alloc). This flag is - * intended for optimization: setting memory tags at the same time as zeroing - * memory has minimal additional performace impact. + * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that + * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting + * memory tags at the same time as zeroing memory has minimal additional + * performace impact. * * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. * Only effective in HW_TAGS mode. @@ -253,6 +256,7 @@ struct vm_area_struct; #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) +#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) #define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) #define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) @@ -261,7 +265,7 @@ struct vm_area_struct; /* Room for N __GFP_FOO bits */ #define __GFP_BITS_SHIFT (24 + \ - 2 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ + 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 134c45e62d91..6532119a6bf1 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -53,6 +53,7 @@ #ifdef CONFIG_KASAN_HW_TAGS #define __def_gfpflag_names_kasan , \ + {(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} #else diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59a9408e1e7d..bdc8f60ae462 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2363,10 +2363,21 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON); } +static inline bool should_skip_init(gfp_t flags) +{ + /* Don't skip, if hardware tag-based KASAN is not enabled. */ + if (!kasan_hw_tags_enabled()) + return false; + + /* For hardware tag-based KASAN, skip if requested. */ + return (flags & __GFP_SKIP_ZERO); +} + inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { - bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && + !should_skip_init(gfp_flags); bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); set_page_private(page, 0); -- cgit v1.2.3-71-gd317 From 23689e91fb22c15b84ac6c22ad9942039792f3af Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:32 -0700 Subject: kasan, vmalloc: add vmalloc tagging for HW_TAGS Add vmalloc tagging support to HW_TAGS KASAN. The key difference between HW_TAGS and the other two KASAN modes when it comes to vmalloc: HW_TAGS KASAN can only assign tags to physical memory. The other two modes have shadow memory covering every mapped virtual memory region. Make __kasan_unpoison_vmalloc() for HW_TAGS KASAN: - Skip non-VM_ALLOC mappings as HW_TAGS KASAN can only tag a single mapping of normal physical memory; see the comment in the function. - Generate a random tag, tag the returned pointer and the allocation, and initialize the allocation at the same time. - Propagate the tag into the page stucts to allow accesses through page_address(vmalloc_to_page()). The rest of vmalloc-related KASAN hooks are not needed: - The shadow-related ones are fully skipped. - __kasan_poison_vmalloc() is kept as a no-op with a comment. Poisoning and zeroing of physical pages that are backing vmalloc() allocations are skipped via __GFP_SKIP_KASAN_UNPOISON and __GFP_SKIP_ZERO: __kasan_unpoison_vmalloc() does that instead. Enabling CONFIG_KASAN_VMALLOC with HW_TAGS is not yet allowed. Link: https://lkml.kernel.org/r/d19b2e9e59a9abc59d05b72dea8429dcaea739c6.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 36 +++++++++++++++++--- kernel/scs.c | 4 +-- mm/kasan/hw_tags.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++ mm/kasan/shadow.c | 10 +++++- mm/vmalloc.c | 51 ++++++++++++++++++++++------ 5 files changed, 175 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d3efb6cbd0f5..65fc5285bb59 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -26,6 +26,12 @@ struct kunit_kasan_expectation { #endif +typedef unsigned int __bitwise kasan_vmalloc_flags_t; + +#define KASAN_VMALLOC_NONE 0x00u +#define KASAN_VMALLOC_INIT 0x01u +#define KASAN_VMALLOC_VM_ALLOC 0x02u + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #include @@ -397,18 +403,39 @@ static inline void kasan_init_hw_tags(void) { } #ifdef CONFIG_KASAN_VMALLOC +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -void *__kasan_unpoison_vmalloc(const void *start, unsigned long size); +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } +static inline int kasan_populate_vmalloc(unsigned long start, + unsigned long size) +{ + return 0; +} +static inline void kasan_release_vmalloc(unsigned long start, + unsigned long end, + unsigned long free_region_start, + unsigned long free_region_end) { } + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags); static __always_inline void *kasan_unpoison_vmalloc(const void *start, - unsigned long size) + unsigned long size, + kasan_vmalloc_flags_t flags) { if (kasan_enabled()) - return __kasan_unpoison_vmalloc(start, size); + return __kasan_unpoison_vmalloc(start, size, flags); return (void *)start; } @@ -435,7 +462,8 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long free_region_end) { } static inline void *kasan_unpoison_vmalloc(const void *start, - unsigned long size) + unsigned long size, + kasan_vmalloc_flags_t flags) { return (void *)start; } diff --git a/kernel/scs.c b/kernel/scs.c index 579841be8864..b83bc9251f99 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -32,7 +32,7 @@ static void *__scs_alloc(int node) for (i = 0; i < NR_CACHED_SCS; i++) { s = this_cpu_xchg(scs_cache[i], NULL); if (s) { - kasan_unpoison_vmalloc(s, SCS_SIZE); + kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE); memset(s, 0, SCS_SIZE); return s; } @@ -78,7 +78,7 @@ void scs_free(void *s) if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) return; - kasan_unpoison_vmalloc(s, SCS_SIZE); + kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE); vfree_atomic(s); } diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 76cf2b6229c7..21104fd51872 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -192,6 +192,98 @@ void __init kasan_init_hw_tags(void) kasan_stack_collection_enabled() ? "on" : "off"); } +#ifdef CONFIG_KASAN_VMALLOC + +static void unpoison_vmalloc_pages(const void *addr, u8 tag) +{ + struct vm_struct *area; + int i; + + /* + * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations + * (see the comment in __kasan_unpoison_vmalloc), all of the pages + * should belong to a single area. + */ + area = find_vm_area((void *)addr); + if (WARN_ON(!area)) + return; + + for (i = 0; i < area->nr_pages; i++) { + struct page *page = area->pages[i]; + + page_kasan_tag_set(page, tag); + } +} + +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) +{ + u8 tag; + unsigned long redzone_start, redzone_size; + + if (!is_vmalloc_or_module_addr(start)) + return (void *)start; + + /* + * Skip unpoisoning and assigning a pointer tag for non-VM_ALLOC + * mappings as: + * + * 1. Unlike the software KASAN modes, hardware tag-based KASAN only + * supports tagging physical memory. Therefore, it can only tag a + * single mapping of normal physical pages. + * 2. Hardware tag-based KASAN can only tag memory mapped with special + * mapping protection bits, see arch_vmalloc_pgprot_modify(). + * As non-VM_ALLOC mappings can be mapped outside of vmalloc code, + * providing these bits would require tracking all non-VM_ALLOC + * mappers. + * + * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags + * the first virtual mapping, which is created by vmalloc(). + * Tagging the page_alloc memory backing that vmalloc() allocation is + * skipped, see ___GFP_SKIP_KASAN_UNPOISON. + * + * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual. + */ + if (!(flags & KASAN_VMALLOC_VM_ALLOC)) + return (void *)start; + + tag = kasan_random_tag(); + start = set_tag(start, tag); + + /* Unpoison and initialize memory up to size. */ + kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT); + + /* + * Explicitly poison and initialize the in-page vmalloc() redzone. + * Unlike software KASAN modes, hardware tag-based KASAN doesn't + * unpoison memory when populating shadow for vmalloc() space. + */ + redzone_start = round_up((unsigned long)start + size, + KASAN_GRANULE_SIZE); + redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start; + kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID, + flags & KASAN_VMALLOC_INIT); + + /* + * Set per-page tag flags to allow accessing physical memory for the + * vmalloc() mapping through page_address(vmalloc_to_page()). + */ + unpoison_vmalloc_pages(start, tag); + + return (void *)start; +} + +void __kasan_poison_vmalloc(const void *start, unsigned long size) +{ + /* + * No tagging here. + * The physical pages backing the vmalloc() allocation are poisoned + * through the usual page_alloc paths. + */ +} + +#endif + #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) void kasan_enable_tagging_sync(void) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 5a866f6663fc..b958babc8fed 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -475,8 +475,16 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, } } -void *__kasan_unpoison_vmalloc(const void *start, unsigned long size) +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags) { + /* + * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC + * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. + * Software KASAN modes can't optimize zeroing memory by combining it + * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. + */ + if (!is_vmalloc_or_module_addr(start)) return (void *)start; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8da8501db942..185ab3e27d13 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2237,8 +2237,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) return NULL; } - /* Mark the pages as accessible, now that they are mapped. */ - mem = kasan_unpoison_vmalloc(mem, size); + /* + * Mark the pages as accessible, now that they are mapped. + * With hardware tag-based KASAN, marking is skipped for + * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). + */ + mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_NONE); return mem; } @@ -2472,9 +2476,12 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, * best-effort approach, as they can be mapped outside of vmalloc code. * For VM_ALLOC mappings, the pages are marked as accessible after * getting mapped in __vmalloc_node_range(). + * With hardware tag-based KASAN, marking is skipped for + * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ if (!(flags & VM_ALLOC)) - area->addr = kasan_unpoison_vmalloc(area->addr, requested_size); + area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, + KASAN_VMALLOC_NONE); return area; } @@ -3084,6 +3091,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, { struct vm_struct *area; void *ret; + kasan_vmalloc_flags_t kasan_flags; unsigned long real_size = size; unsigned long real_align = align; unsigned int shift = PAGE_SHIFT; @@ -3136,21 +3144,39 @@ again: goto fail; } - /* - * Modify protection bits to allow tagging. - * This must be done before mapping by __vmalloc_area_node(). - */ + /* Prepare arguments for __vmalloc_area_node(). */ if (kasan_hw_tags_enabled() && - pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) + pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { + /* + * Modify protection bits to allow tagging. + * This must be done before mapping in __vmalloc_area_node(). + */ prot = arch_vmap_pgprot_tagged(prot); + /* + * Skip page_alloc poisoning and zeroing for physical pages + * backing VM_ALLOC mapping. Memory is instead poisoned and + * zeroed by kasan_unpoison_vmalloc(). + */ + gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; + } + /* Allocate physical pages and map them into vmalloc space. */ ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!ret) goto fail; - /* Mark the pages as accessible, now that they are mapped. */ - area->addr = kasan_unpoison_vmalloc(area->addr, real_size); + /* + * Mark the pages as accessible, now that they are mapped. + * The init condition should match the one in post_alloc_hook() + * (except for the should_skip_init() check) to make sure that memory + * is initialized under the same conditions regardless of the enabled + * KASAN mode. + */ + kasan_flags = KASAN_VMALLOC_VM_ALLOC; + if (!want_init_on_free() && want_init_on_alloc(gfp_mask)) + kasan_flags |= KASAN_VMALLOC_INIT; + area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -3850,10 +3876,13 @@ retry: /* * Mark allocated areas as accessible. Do it now as a best-effort * approach, as they can be mapped outside of vmalloc code. + * With hardware tag-based KASAN, marking is skipped for + * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ for (area = 0; area < nr_vms; area++) vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, - vms[area]->size); + vms[area]->size, + KASAN_VMALLOC_NONE); kfree(vas); return vms; -- cgit v1.2.3-71-gd317 From f6e39794f4b6da7ca9b77f2f9ad11fd6f0ac83e5 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:11:35 -0700 Subject: kasan, vmalloc: only tag normal vmalloc allocations The kernel can use to allocate executable memory. The only supported way to do that is via __vmalloc_node_range() with the executable bit set in the prot argument. (vmap() resets the bit via pgprot_nx()). Once tag-based KASAN modes start tagging vmalloc allocations, executing code from such allocations will lead to the PC register getting a tag, which is not tolerated by the kernel. Only tag the allocations for normal kernel pages. [andreyknvl@google.com: pass KASAN_VMALLOC_PROT_NORMAL to kasan_unpoison_vmalloc()] Link: https://lkml.kernel.org/r/9230ca3d3e40ffca041c133a524191fd71969a8d.1646233925.git.andreyknvl@google.com [andreyknvl@google.com: support tagged vmalloc mappings] Link: https://lkml.kernel.org/r/2f6605e3a358cf64d73a05710cb3da356886ad29.1646233925.git.andreyknvl@google.com [andreyknvl@google.com: don't unintentionally disabled poisoning] Link: https://lkml.kernel.org/r/de4587d6a719232e83c760113e46ed2d4d8da61e.1646757322.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/fbfd9939a4dc375923c9a5c6b9e7ab05c26b8c6b.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Mark Rutland Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 7 ++++--- kernel/scs.c | 12 ++++++++---- mm/kasan/hw_tags.c | 7 +++++++ mm/kasan/shadow.c | 8 ++++++++ mm/vmalloc.c | 49 +++++++++++++++++++++++++++++-------------------- 5 files changed, 56 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 65fc5285bb59..903b9453931d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -28,9 +28,10 @@ struct kunit_kasan_expectation { typedef unsigned int __bitwise kasan_vmalloc_flags_t; -#define KASAN_VMALLOC_NONE 0x00u -#define KASAN_VMALLOC_INIT 0x01u -#define KASAN_VMALLOC_VM_ALLOC 0x02u +#define KASAN_VMALLOC_NONE 0x00u +#define KASAN_VMALLOC_INIT 0x01u +#define KASAN_VMALLOC_VM_ALLOC 0x02u +#define KASAN_VMALLOC_PROT_NORMAL 0x04u #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/kernel/scs.c b/kernel/scs.c index b83bc9251f99..b7e1b096d906 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -32,15 +32,19 @@ static void *__scs_alloc(int node) for (i = 0; i < NR_CACHED_SCS; i++) { s = this_cpu_xchg(scs_cache[i], NULL); if (s) { - kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE); + s = kasan_unpoison_vmalloc(s, SCS_SIZE, + KASAN_VMALLOC_PROT_NORMAL); memset(s, 0, SCS_SIZE); - return s; + goto out; } } - return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END, + s = __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END, GFP_SCS, PAGE_KERNEL, 0, node, __builtin_return_address(0)); + +out: + return kasan_reset_tag(s); } void *scs_alloc(int node) @@ -78,7 +82,7 @@ void scs_free(void *s) if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) return; - kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE); + kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_PROT_NORMAL); vfree_atomic(s); } diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 21104fd51872..2e9378a4f07f 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -247,6 +247,13 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, if (!(flags & KASAN_VMALLOC_VM_ALLOC)) return (void *)start; + /* + * Don't tag executable memory. + * The kernel doesn't tolerate having the PC register tagged. + */ + if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) + return (void *)start; + tag = kasan_random_tag(); start = set_tag(start, tag); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index b958babc8fed..a4f07de21771 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -488,6 +488,14 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, if (!is_vmalloc_or_module_addr(start)) return (void *)start; + /* + * Don't tag executable memory with the tag-based mode. + * The kernel doesn't tolerate having the PC register tagged. + */ + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && + !(flags & KASAN_VMALLOC_PROT_NORMAL)) + return (void *)start; + start = set_tag(start, kasan_random_tag()); kasan_unpoison(start, size, false); return (void *)start; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 185ab3e27d13..e163372d3967 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2242,7 +2242,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ - mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_NONE); + mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); return mem; } @@ -2481,7 +2481,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, */ if (!(flags & VM_ALLOC)) area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, - KASAN_VMALLOC_NONE); + KASAN_VMALLOC_PROT_NORMAL); return area; } @@ -3091,7 +3091,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, { struct vm_struct *area; void *ret; - kasan_vmalloc_flags_t kasan_flags; + kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; unsigned long real_size = size; unsigned long real_align = align; unsigned int shift = PAGE_SHIFT; @@ -3144,21 +3144,28 @@ again: goto fail; } - /* Prepare arguments for __vmalloc_area_node(). */ - if (kasan_hw_tags_enabled() && - pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { - /* - * Modify protection bits to allow tagging. - * This must be done before mapping in __vmalloc_area_node(). - */ - prot = arch_vmap_pgprot_tagged(prot); + /* + * Prepare arguments for __vmalloc_area_node() and + * kasan_unpoison_vmalloc(). + */ + if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { + if (kasan_hw_tags_enabled()) { + /* + * Modify protection bits to allow tagging. + * This must be done before mapping. + */ + prot = arch_vmap_pgprot_tagged(prot); - /* - * Skip page_alloc poisoning and zeroing for physical pages - * backing VM_ALLOC mapping. Memory is instead poisoned and - * zeroed by kasan_unpoison_vmalloc(). - */ - gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; + /* + * Skip page_alloc poisoning and zeroing for physical + * pages backing VM_ALLOC mapping. Memory is instead + * poisoned and zeroed by kasan_unpoison_vmalloc(). + */ + gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; + } + + /* Take note that the mapping is PAGE_KERNEL. */ + kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; } /* Allocate physical pages and map them into vmalloc space. */ @@ -3172,10 +3179,13 @@ again: * (except for the should_skip_init() check) to make sure that memory * is initialized under the same conditions regardless of the enabled * KASAN mode. + * Tag-based KASAN modes only assign tags to normal non-executable + * allocations, see __kasan_unpoison_vmalloc(). */ - kasan_flags = KASAN_VMALLOC_VM_ALLOC; + kasan_flags |= KASAN_VMALLOC_VM_ALLOC; if (!want_init_on_free() && want_init_on_alloc(gfp_mask)) kasan_flags |= KASAN_VMALLOC_INIT; + /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); /* @@ -3881,8 +3891,7 @@ retry: */ for (area = 0; area < nr_vms; area++) vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, - vms[area]->size, - KASAN_VMALLOC_NONE); + vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); kfree(vas); return vms; -- cgit v1.2.3-71-gd317 From ed6d74446cbfb88c747d4b32477a9d46132694db Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:12:02 -0700 Subject: kasan: test: support async (again) and asymm modes for HW_TAGS Async mode support has already been implemented in commit e80a76aa1a91 ("kasan, arm64: tests supports for HW_TAGS async mode") but then got accidentally broken in commit 99734b535d9b ("kasan: detect false-positives in tests"). Restore the changes removed by the latter patch and adapt them for asymm mode: add a sync_fault flag to kunit_kasan_expectation that only get set if the MTE fault was synchronous, and reenable MTE on such faults in tests. Also rename kunit_kasan_expectation to kunit_kasan_status and move its definition to mm/kasan/kasan.h from include/linux/kasan.h, as this structure is only internally used by KASAN. Also put the structure definition under IS_ENABLED(CONFIG_KUNIT). Link: https://lkml.kernel.org/r/133970562ccacc93ba19d754012c562351d4a8c8.1645033139.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Vincenzo Frascino Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 5 ----- lib/test_kasan.c | 39 ++++++++++++++++++++++----------------- mm/kasan/hw_tags.c | 18 +++++++++--------- mm/kasan/kasan.h | 14 ++++++++++++-- mm/kasan/report.c | 17 +++++++++-------- 5 files changed, 52 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 903b9453931d..fe36215807f7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -19,11 +19,6 @@ struct task_struct; #include #include -/* kasan_data struct is used in KUnit tests for KASAN expected failures */ -struct kunit_kasan_expectation { - bool report_found; -}; - #endif typedef unsigned int __bitwise kasan_vmalloc_flags_t; diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 2addd0c66acf..72391f992689 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -37,7 +37,7 @@ void *kasan_ptr_result; int kasan_int_result; static struct kunit_resource resource; -static struct kunit_kasan_expectation fail_data; +static struct kunit_kasan_status test_status; static bool multishot; /* @@ -54,58 +54,63 @@ static int kasan_test_init(struct kunit *test) } multishot = kasan_save_enable_multi_shot(); - fail_data.report_found = false; + test_status.report_found = false; + test_status.sync_fault = false; kunit_add_named_resource(test, NULL, NULL, &resource, - "kasan_data", &fail_data); + "kasan_status", &test_status); return 0; } static void kasan_test_exit(struct kunit *test) { kasan_restore_multi_shot(multishot); - KUNIT_EXPECT_FALSE(test, fail_data.report_found); + KUNIT_EXPECT_FALSE(test, test_status.report_found); } /** * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a * KASAN report; causes a test failure otherwise. This relies on a KUnit - * resource named "kasan_data". Do not use this name for KUnit resources + * resource named "kasan_status". Do not use this name for KUnit resources * outside of KASAN tests. * - * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag + * For hardware tag-based KASAN, when a synchronous tag fault happens, tag * checking is auto-disabled. When this happens, this test handler reenables * tag checking. As tag checking can be only disabled or enabled per CPU, * this handler disables migration (preemption). * - * Since the compiler doesn't see that the expression can change the fail_data + * Since the compiler doesn't see that the expression can change the test_status * fields, it can reorder or optimize away the accesses to those fields. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the * expression to prevent that. * - * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as - * false. This allows detecting KASAN reports that happen outside of the checks - * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL - * and in kasan_test_exit. + * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept + * as false. This allows detecting KASAN reports that happen outside of the + * checks by asserting !test_status.report_found at the start of + * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit. */ #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ kasan_sync_fault_possible()) \ migrate_disable(); \ - KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ + KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \ barrier(); \ expression; \ barrier(); \ - if (!READ_ONCE(fail_data.report_found)) { \ + if (kasan_async_fault_possible()) \ + kasan_force_async_fault(); \ + if (!READ_ONCE(test_status.report_found)) { \ KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \ "expected in \"" #expression \ "\", but none occurred"); \ } \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ - if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging_sync(); \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + kasan_sync_fault_possible()) { \ + if (READ_ONCE(test_status.report_found) && \ + READ_ONCE(test_status.sync_fault)) \ + kasan_enable_tagging(); \ migrate_enable(); \ } \ - WRITE_ONCE(fail_data.report_found, false); \ + WRITE_ONCE(test_status.report_found, false); \ } while (0) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index fad1887e54c0..07a76c46daa5 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -172,12 +172,7 @@ void kasan_init_hw_tags_cpu(void) * Enable async or asymm modes only when explicitly requested * through the command line. */ - if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC) - hw_enable_tagging_async(); - else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM) - hw_enable_tagging_asymm(); - else - hw_enable_tagging_sync(); + kasan_enable_tagging(); } /* kasan_init_hw_tags() is called once on boot CPU. */ @@ -343,11 +338,16 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size) #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -void kasan_enable_tagging_sync(void) +void kasan_enable_tagging(void) { - hw_enable_tagging_sync(); + if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC) + hw_enable_tagging_async(); + else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM) + hw_enable_tagging_asymm(); + else + hw_enable_tagging_sync(); } -EXPORT_SYMBOL_GPL(kasan_enable_tagging_sync); +EXPORT_SYMBOL_GPL(kasan_enable_tagging); void kasan_force_async_fault(void) { diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 4d67408e8407..d1e111b7d5d8 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -7,6 +7,16 @@ #include #include +#if IS_ENABLED(CONFIG_KUNIT) + +/* Used in KUnit-compatible KASAN tests. */ +struct kunit_kasan_status { + bool report_found; + bool sync_fault; +}; + +#endif + #ifdef CONFIG_KASAN_HW_TAGS #include @@ -350,12 +360,12 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -void kasan_enable_tagging_sync(void); +void kasan_enable_tagging(void); void kasan_force_async_fault(void); #else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ -static inline void kasan_enable_tagging_sync(void) { } +static inline void kasan_enable_tagging(void) { } static inline void kasan_force_async_fault(void) { } #endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */ diff --git a/mm/kasan/report.c b/mm/kasan/report.c index f14146563d41..137c2c0b09db 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -336,20 +336,21 @@ static bool report_enabled(void) } #if IS_ENABLED(CONFIG_KUNIT) -static void kasan_update_kunit_status(struct kunit *cur_test) +static void kasan_update_kunit_status(struct kunit *cur_test, bool sync) { struct kunit_resource *resource; - struct kunit_kasan_expectation *kasan_data; + struct kunit_kasan_status *status; - resource = kunit_find_named_resource(cur_test, "kasan_data"); + resource = kunit_find_named_resource(cur_test, "kasan_status"); if (!resource) { kunit_set_failure(cur_test); return; } - kasan_data = (struct kunit_kasan_expectation *)resource->data; - WRITE_ONCE(kasan_data->report_found, true); + status = (struct kunit_kasan_status *)resource->data; + WRITE_ONCE(status->report_found, true); + WRITE_ONCE(status->sync_fault, sync); kunit_put_resource(resource); } #endif /* IS_ENABLED(CONFIG_KUNIT) */ @@ -363,7 +364,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) #if IS_ENABLED(CONFIG_KUNIT) if (current->kunit_test) - kasan_update_kunit_status(current->kunit_test); + kasan_update_kunit_status(current->kunit_test, true); #endif /* IS_ENABLED(CONFIG_KUNIT) */ start_report(&flags); @@ -383,7 +384,7 @@ void kasan_report_async(void) #if IS_ENABLED(CONFIG_KUNIT) if (current->kunit_test) - kasan_update_kunit_status(current->kunit_test); + kasan_update_kunit_status(current->kunit_test, false); #endif /* IS_ENABLED(CONFIG_KUNIT) */ start_report(&flags); @@ -405,7 +406,7 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write, #if IS_ENABLED(CONFIG_KUNIT) if (current->kunit_test) - kasan_update_kunit_status(current->kunit_test); + kasan_update_kunit_status(current->kunit_test, true); #endif /* IS_ENABLED(CONFIG_KUNIT) */ disable_trace_on_warning(); -- cgit v1.2.3-71-gd317 From 80207910cd71b4e0e87140d165d82b5d3ff69e53 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 24 Mar 2022 18:13:12 -0700 Subject: kasan: move and hide kasan_save_enable/restore_multi_shot - Move kasan_save_enable/restore_multi_shot() declarations to mm/kasan/kasan.h, as there is no need for them to be visible outside of KASAN implementation. - Only define and export these functions when KASAN tests are enabled. - Move their definitions closer to other test-related code in report.c. Link: https://lkml.kernel.org/r/6ba637333b78447f027d775f2d55ab1a40f63c99.1646237226.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 4 ---- mm/kasan/kasan.h | 7 +++++++ mm/kasan/report.c | 30 +++++++++++++++++------------- 3 files changed, 24 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index fe36215807f7..ceebcb9de7bf 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -267,10 +267,6 @@ static __always_inline bool kasan_check_byte(const void *addr) return true; } - -bool kasan_save_enable_multi_shot(void); -void kasan_restore_multi_shot(bool enabled); - #else /* CONFIG_KASAN */ static inline slab_flags_t kasan_never_merge(void) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 9d2e128eb623..d79b83d673b1 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -492,6 +492,13 @@ static inline bool kasan_arch_is_ready(void) { return true; } #error kasan_arch_is_ready only works in KASAN generic outline mode! #endif +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) + +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); + +#endif + /* * Exported functions for interfaces called from assembly or from generated * code. Declarations here to avoid warning about missing declarations. diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 7ef3b0455603..c9bfffe931b4 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -64,19 +64,6 @@ static int __init early_kasan_fault(char *arg) } early_param("kasan.fault", early_kasan_fault); -bool kasan_save_enable_multi_shot(void) -{ - return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); -} -EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); - -void kasan_restore_multi_shot(bool enabled) -{ - if (!enabled) - clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); -} -EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); - static int __init kasan_set_multi_shot(char *str) { set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); @@ -109,6 +96,23 @@ static bool report_enabled(void) return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); } +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) + +bool kasan_save_enable_multi_shot(void) +{ + return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); +} +EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); + +void kasan_restore_multi_shot(bool enabled) +{ + if (!enabled) + clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); +} +EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); + +#endif + #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) static void update_kunit_status(bool sync) { -- cgit v1.2.3-71-gd317 From 562beb7235abfebdd8366e0664a5c3d1e597b990 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 24 Mar 2022 18:13:27 -0700 Subject: mm/huge_memory: make is_transparent_hugepage() static It's only used inside the huge_memory.c now. Don't export it and make it static. We can thus reduce the size of huge_memory.o a bit. Without this patch: text data bss dec hex filename 32319 2965 4 35288 89d8 mm/huge_memory.o With this patch: text data bss dec hex filename 32042 2957 4 35003 88bb mm/huge_memory.o Link: https://lkml.kernel.org/r/20220302082145.12028-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Reviewed-by: Yang Shi Cc: Matthew Wilcox Cc: William Kucharski Cc: Hugh Dickins Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 6 ------ mm/huge_memory.c | 3 +-- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 0734aff8fa19..2999190adc22 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -183,7 +183,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, void prep_transhuge_page(struct page *page); void free_transhuge_page(struct page *page); -bool is_transparent_hugepage(struct page *page); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); @@ -341,11 +340,6 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, static inline void prep_transhuge_page(struct page *page) {} -static inline bool is_transparent_hugepage(struct page *page) -{ - return false; -} - #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 035aa204ab8d..057b13bde82d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -533,7 +533,7 @@ void prep_transhuge_page(struct page *page) set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); } -bool is_transparent_hugepage(struct page *page) +static inline bool is_transparent_hugepage(struct page *page) { if (!PageCompound(page)) return false; @@ -542,7 +542,6 @@ bool is_transparent_hugepage(struct page *page) return is_huge_zero_page(page) || page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; } -EXPORT_SYMBOL_GPL(is_transparent_hugepage); static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, -- cgit v1.2.3-71-gd317 From 03104c2c5db8918030788e607e4af980b2f42bb3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 24 Mar 2022 18:13:50 -0700 Subject: mm/swapfile: remove stale reuse_swap_page() All users are gone, let's remove it. We'll let SWP_STABLE_WRITES stick around for now, as it might come in handy in the near future. Link: https://lkml.kernel.org/r/20220131162940.210846-8-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Vlastimil Babka Cc: Andrea Arcangeli Cc: Christoph Hellwig Cc: David Rientjes Cc: Don Dutile Cc: Hugh Dickins Cc: Jan Kara Cc: Jann Horn Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Liang Zhang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Oleg Nesterov Cc: Peter Xu Cc: Rik van Riel Cc: Roman Gushchin Cc: Shakeel Butt Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 4 -- mm/swapfile.c | 104 --------------------------------------------------- 2 files changed, 108 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index f37837c614c5..27093b477c5f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -515,7 +515,6 @@ extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); -extern bool reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); @@ -681,9 +680,6 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -#define reuse_swap_page(page) \ - (page_trans_huge_mapcount(page) == 1) - static inline int try_to_free_swap(struct page *page) { return 0; diff --git a/mm/swapfile.c b/mm/swapfile.c index 33c7abb16610..63c61f8b2611 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1167,16 +1167,6 @@ out: return NULL; } -static struct swap_info_struct *swap_info_get(swp_entry_t entry) -{ - struct swap_info_struct *p; - - p = _swap_info_get(entry); - if (p) - spin_lock(&p->lock); - return p; -} - static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, struct swap_info_struct *q) { @@ -1601,100 +1591,6 @@ static bool page_swapped(struct page *page) return false; } -static int page_trans_huge_map_swapcount(struct page *page, - int *total_swapcount) -{ - int i, map_swapcount, _total_swapcount; - unsigned long offset = 0; - struct swap_info_struct *si; - struct swap_cluster_info *ci = NULL; - unsigned char *map = NULL; - int swapcount = 0; - - /* hugetlbfs shouldn't call it */ - VM_BUG_ON_PAGE(PageHuge(page), page); - - if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) { - if (PageSwapCache(page)) - swapcount = page_swapcount(page); - if (total_swapcount) - *total_swapcount = swapcount; - return swapcount + page_trans_huge_mapcount(page); - } - - page = compound_head(page); - - _total_swapcount = map_swapcount = 0; - if (PageSwapCache(page)) { - swp_entry_t entry; - - entry.val = page_private(page); - si = _swap_info_get(entry); - if (si) { - map = si->swap_map; - offset = swp_offset(entry); - } - } - if (map) - ci = lock_cluster(si, offset); - for (i = 0; i < HPAGE_PMD_NR; i++) { - int mapcount = atomic_read(&page[i]._mapcount) + 1; - if (map) { - swapcount = swap_count(map[offset + i]); - _total_swapcount += swapcount; - } - map_swapcount = max(map_swapcount, mapcount + swapcount); - } - unlock_cluster(ci); - - if (PageDoubleMap(page)) - map_swapcount -= 1; - - if (total_swapcount) - *total_swapcount = _total_swapcount; - - return map_swapcount + compound_mapcount(page); -} - -/* - * We can write to an anon page without COW if there are no other references - * to it. And as a side-effect, free up its swap: because the old content - * on disk will never be read, and seeking back there to write new content - * later would only waste time away from clustering. - */ -bool reuse_swap_page(struct page *page) -{ - int count, total_swapcount; - - VM_BUG_ON_PAGE(!PageLocked(page), page); - if (unlikely(PageKsm(page))) - return false; - count = page_trans_huge_map_swapcount(page, &total_swapcount); - if (count == 1 && PageSwapCache(page) && - (likely(!PageTransCompound(page)) || - /* The remaining swap count will be freed soon */ - total_swapcount == page_swapcount(page))) { - if (!PageWriteback(page)) { - page = compound_head(page); - delete_from_swap_cache(page); - SetPageDirty(page); - } else { - swp_entry_t entry; - struct swap_info_struct *p; - - entry.val = page_private(page); - p = swap_info_get(entry); - if (p->flags & SWP_STABLE_WRITES) { - spin_unlock(&p->lock); - return false; - } - spin_unlock(&p->lock); - } - } - - return count <= 1; -} - /* * If swap is getting full, or if there are no more mappings of this page, * then try_to_free_swap is called to free its swap space. -- cgit v1.2.3-71-gd317 From 55c62fa7c53368a9011cd1276c001a1469078c6a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 24 Mar 2022 18:13:53 -0700 Subject: mm/huge_memory: remove stale page_trans_huge_mapcount() All users are gone, let's remove it. Link: https://lkml.kernel.org/r/20220131162940.210846-9-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Vlastimil Babka Cc: Andrea Arcangeli Cc: Christoph Hellwig Cc: David Rientjes Cc: Don Dutile Cc: Hugh Dickins Cc: Jan Kara Cc: Jann Horn Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Liang Zhang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport Cc: Nadav Amit Cc: Oleg Nesterov Cc: Peter Xu Cc: Rik van Riel Cc: Roman Gushchin Cc: Shakeel Butt Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 ----- mm/huge_memory.c | 48 ------------------------------------------------ 2 files changed, 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7a3dd7e617e4..e34edb775334 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -834,16 +834,11 @@ static inline int total_mapcount(struct page *page) return folio_mapcount(page_folio(page)); } -int page_trans_huge_mapcount(struct page *page); #else static inline int total_mapcount(struct page *page) { return page_mapcount(page); } -static inline int page_trans_huge_mapcount(struct page *page) -{ - return page_mapcount(page); -} #endif static inline struct page *virt_to_head_page(const void *x) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a5f651c6df4e..cc580d90117b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2483,54 +2483,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, } } -/* - * This calculates accurately how many mappings a transparent hugepage - * has (unlike page_mapcount() which isn't fully accurate). This full - * accuracy is primarily needed to know if copy-on-write faults can - * reuse the page and change the mapping to read-write instead of - * copying them. At the same time this returns the total_mapcount too. - * - * The function returns the highest mapcount any one of the subpages - * has. If the return value is one, even if different processes are - * mapping different subpages of the transparent hugepage, they can - * all reuse it, because each process is reusing a different subpage. - * - * The total_mapcount is instead counting all virtual mappings of the - * subpages. If the total_mapcount is equal to "one", it tells the - * caller all mappings belong to the same "mm" and in turn the - * anon_vma of the transparent hugepage can become the vma->anon_vma - * local one as no other process may be mapping any of the subpages. - * - * It would be more accurate to replace page_mapcount() with - * page_trans_huge_mapcount(), however we only use - * page_trans_huge_mapcount() in the copy-on-write faults where we - * need full accuracy to avoid breaking page pinning, because - * page_trans_huge_mapcount() is slower than page_mapcount(). - */ -int page_trans_huge_mapcount(struct page *page) -{ - int i, ret; - - /* hugetlbfs shouldn't call it */ - VM_BUG_ON_PAGE(PageHuge(page), page); - - if (likely(!PageTransCompound(page))) - return atomic_read(&page->_mapcount) + 1; - - page = compound_head(page); - - ret = 0; - for (i = 0; i < thp_nr_pages(page); i++) { - int mapcount = atomic_read(&page[i]._mapcount) + 1; - ret = max(ret, mapcount); - } - - if (PageDoubleMap(page)) - ret -= 1; - - return ret + compound_mapcount(page); -} - /* Racy check whether the huge page can be split */ bool can_split_folio(struct folio *folio, int *pextra_pins) { -- cgit v1.2.3-71-gd317 From 566d3362885aab04d6b0f885f12db3176ca3a032 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 24 Mar 2022 18:13:59 -0700 Subject: mm: warn on deleting redirtied only if accounted filemap_unaccount_folio() has a WARN_ON_ONCE(folio_test_dirty(folio)). It is good to warn of late dirtying on a persistent filesystem, but late dirtying on tmpfs can only lose data which is expected to be thrown away; and it's a pity if that warning comes ONCE on tmpfs, then hides others which really matter. Make it conditional on mapping_cap_writeback(). Cleanup: then folio_account_cleaned() no longer needs to check that for itself, and so no longer needs to know the mapping. Link: https://lkml.kernel.org/r/b5a1106c-7226-a5c6-ad41-ad4832cae1f@google.com Signed-off-by: Hugh Dickins Cc: Matthew Wilcox Cc: Jan Kara Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 3 +-- mm/filemap.c | 14 +++++++++----- mm/page-writeback.c | 18 ++++++++---------- 3 files changed, 18 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 58f395f3febe..a8d0b327b066 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1009,8 +1009,7 @@ static inline void __set_page_dirty(struct page *page, { __folio_mark_dirty(page_folio(page), mapping, warn); } -void folio_account_cleaned(struct folio *folio, struct address_space *mapping, - struct bdi_writeback *wb); +void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) { diff --git a/mm/filemap.c b/mm/filemap.c index c5c169c16bae..8426434042f4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -193,16 +193,20 @@ static void filemap_unaccount_folio(struct address_space *mapping, /* * At this point folio must be either written or cleaned by * truncate. Dirty folio here signals a bug and loss of - * unwritten data. + * unwritten data - on ordinary filesystems. * - * This fixes dirty accounting after removing the folio entirely + * But it's harmless on in-memory filesystems like tmpfs; and can + * occur when a driver which did get_user_pages() sets page dirty + * before putting it, while the inode is being finally evicted. + * + * Below fixes dirty accounting after removing the folio entirely * but leaves the dirty flag set: it has no effect for truncated * folio and anyway will be cleared before returning folio to * buddy allocator. */ - if (WARN_ON_ONCE(folio_test_dirty(folio))) - folio_account_cleaned(folio, mapping, - inode_to_wb(mapping->host)); + if (WARN_ON_ONCE(folio_test_dirty(folio) && + mapping_can_writeback(mapping))) + folio_account_cleaned(folio, inode_to_wb(mapping->host)); } /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 435c02630593..7e2da284e427 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2465,16 +2465,14 @@ static void folio_account_dirtied(struct folio *folio, * * Caller must hold lock_page_memcg(). */ -void folio_account_cleaned(struct folio *folio, struct address_space *mapping, - struct bdi_writeback *wb) +void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { - if (mapping_can_writeback(mapping)) { - long nr = folio_nr_pages(folio); - lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); - wb_stat_mod(wb, WB_RECLAIMABLE, -nr); - task_io_account_cancelled_write(nr * PAGE_SIZE); - } + long nr = folio_nr_pages(folio); + + lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); + wb_stat_mod(wb, WB_RECLAIMABLE, -nr); + task_io_account_cancelled_write(nr * PAGE_SIZE); } /* @@ -2683,7 +2681,7 @@ void __folio_cancel_dirty(struct folio *folio) wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) - folio_account_cleaned(folio, mapping, wb); + folio_account_cleaned(folio, wb); unlocked_inode_to_wb_end(inode, &cookie); folio_memcg_unlock(folio); -- cgit v1.2.3-71-gd317 From caaf2ae712b7cc3c7717898fe267dbf882a502ef Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 24 Jan 2022 14:03:24 +0100 Subject: dma-buf: Add dma_fence_array_for_each (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper to iterate over all fences in a dma_fence_array object. v2 (Jason Ekstrand) - Return NULL from dma_fence_array_first if head == NULL. This matches the iterator behavior of dma_fence_chain_for_each in that it iterates zero times if head == NULL. - Return NULL from dma_fence_array_next if index > array->num_fences. Signed-off-by: Jason Ekstrand Reviewed-by: Jason Ekstrand Reviewed-by: Christian König Cc: Daniel Vetter Cc: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20210610210925.642582-2-jason@jlekstrand.net Signed-off-by: Christian König --- drivers/dma-buf/dma-fence-array.c | 27 +++++++++++++++++++++++++++ include/linux/dma-fence-array.h | 17 +++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index cb1bacb5a42b..52b85d292383 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -219,3 +219,30 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context) return true; } EXPORT_SYMBOL(dma_fence_match_context); + +struct dma_fence *dma_fence_array_first(struct dma_fence *head) +{ + struct dma_fence_array *array; + + if (!head) + return NULL; + + array = to_dma_fence_array(head); + if (!array) + return head; + + return array->fences[0]; +} +EXPORT_SYMBOL(dma_fence_array_first); + +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index) +{ + struct dma_fence_array *array = to_dma_fence_array(head); + + if (!array || index >= array->num_fences) + return NULL; + + return array->fences[index]; +} +EXPORT_SYMBOL(dma_fence_array_next); diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index fec374f69e12..e34dcb0bb462 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -61,6 +61,19 @@ to_dma_fence_array(struct dma_fence *fence) return container_of(fence, struct dma_fence_array, base); } +/** + * dma_fence_array_for_each - iterate over all fences in array + * @fence: current fence + * @index: index into the array + * @head: potential dma_fence_array object + * + * Test if @array is a dma_fence_array object and if yes iterate over all fences + * in the array. If not just iterate over the fence in @array itself. + */ +#define dma_fence_array_for_each(fence, index, head) \ + for (index = 0, fence = dma_fence_array_first(head); fence; \ + ++(index), fence = dma_fence_array_next(head, index)) + struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, unsigned seqno, @@ -68,4 +81,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, bool dma_fence_match_context(struct dma_fence *fence, u64 context); +struct dma_fence *dma_fence_array_first(struct dma_fence *head); +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index); + #endif /* __LINUX_DMA_FENCE_ARRAY_H */ -- cgit v1.2.3-71-gd317 From 64a8f92fd783e750cdb81af75942dcd53bbf61bd Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 11 Mar 2022 10:27:53 +0100 Subject: dma-buf: add dma_fence_unwrap v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a general purpose helper to deep dive into dma_fence_chain/dma_fence_array structures and iterate over all the fences in them. This is useful when we need to flatten out all fences in those structures. v2: some selftests cleanup, improved function naming and documentation Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220311110244.1245-1-christian.koenig@amd.com --- Documentation/driver-api/dma-buf.rst | 6 + drivers/dma-buf/Makefile | 1 + drivers/dma-buf/selftests.h | 1 + drivers/dma-buf/st-dma-fence-unwrap.c | 261 ++++++++++++++++++++++++++++++++++ include/linux/dma-fence-array.h | 2 + include/linux/dma-fence-chain.h | 2 + include/linux/dma-fence-unwrap.h | 95 +++++++++++++ 7 files changed, 368 insertions(+) create mode 100644 drivers/dma-buf/st-dma-fence-unwrap.c create mode 100644 include/linux/dma-fence-unwrap.h (limited to 'include/linux') diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 55006678394a..36a76cbe9095 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -185,6 +185,12 @@ DMA Fence Chain .. kernel-doc:: include/linux/dma-fence-chain.h :internal: +DMA Fence unwrap +~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/linux/dma-fence-unwrap.h + :internal: + DMA Fence uABI/Sync File ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 511805dbeb75..4c9eb53ba3f8 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -12,6 +12,7 @@ dmabuf_selftests-y := \ selftest.o \ st-dma-fence.o \ st-dma-fence-chain.o \ + st-dma-fence-unwrap.o \ st-dma-resv.o obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h index 97d73aaa31da..851965867d9c 100644 --- a/drivers/dma-buf/selftests.h +++ b/drivers/dma-buf/selftests.h @@ -12,4 +12,5 @@ selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ selftest(dma_fence, dma_fence) selftest(dma_fence_chain, dma_fence_chain) +selftest(dma_fence_unwrap, dma_fence_unwrap) selftest(dma_resv, dma_resv) diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c new file mode 100644 index 000000000000..039f016b57be --- /dev/null +++ b/drivers/dma-buf/st-dma-fence-unwrap.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + */ + +#include +#if 0 +#include +#include +#include +#include +#include +#include +#include +#endif + +#include "selftest.h" + +#define CHAIN_SZ (4 << 10) + +static inline struct mock_fence { + struct dma_fence base; + spinlock_t lock; +} *to_mock_fence(struct dma_fence *f) { + return container_of(f, struct mock_fence, base); +} + +static const char *mock_name(struct dma_fence *f) +{ + return "mock"; +} + +static const struct dma_fence_ops mock_ops = { + .get_driver_name = mock_name, + .get_timeline_name = mock_name, +}; + +static struct dma_fence *mock_fence(void) +{ + struct mock_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + spin_lock_init(&f->lock); + dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0); + + return &f->base; +} + +static struct dma_fence *mock_array(unsigned int num_fences, ...) +{ + struct dma_fence_array *array; + struct dma_fence **fences; + va_list valist; + int i; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) + return NULL; + + va_start(valist, num_fences); + for (i = 0; i < num_fences; ++i) + fences[i] = va_arg(valist, typeof(*fences)); + va_end(valist); + + array = dma_fence_array_create(num_fences, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto cleanup; + return &array->base; + +cleanup: + for (i = 0; i < num_fences; ++i) + dma_fence_put(fences[i]); + kfree(fences); + return NULL; +} + +static struct dma_fence *mock_chain(struct dma_fence *prev, + struct dma_fence *fence) +{ + struct dma_fence_chain *f; + + f = dma_fence_chain_alloc(); + if (!f) { + dma_fence_put(prev); + dma_fence_put(fence); + return NULL; + } + + dma_fence_chain_init(f, prev, fence, 1); + return &f->base; +} + +static int sanitycheck(void *arg) +{ + struct dma_fence *f, *chain, *array; + int err = 0; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + array = mock_array(1, f); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_signal(f); + dma_fence_put(chain); + return err; +} + +static int unwrap_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, array) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(array); + return 0; +} + +static int unwrap_chain(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + chain = mock_chain(f1, f2); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(chain); + return 0; +} + +static int unwrap_chain_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(chain); + return 0; +} + +int dma_fence_unwrap(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(unwrap_array), + SUBTEST(unwrap_chain), + SUBTEST(unwrap_chain_array), + }; + + return subtests(tests, NULL); +} diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index e34dcb0bb462..ec7f25def392 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -69,6 +69,8 @@ to_dma_fence_array(struct dma_fence *fence) * * Test if @array is a dma_fence_array object and if yes iterate over all fences * in the array. If not just iterate over the fence in @array itself. + * + * For a deep dive iterator see dma_fence_unwrap_for_each(). */ #define dma_fence_array_for_each(fence, index, head) \ for (index = 0, fence = dma_fence_array_first(head); fence; \ diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 10d51bcdf7b7..4bdf0b96da28 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -112,6 +112,8 @@ static inline void dma_fence_chain_free(struct dma_fence_chain *chain) * * Iterate over all fences in the chain. We keep a reference to the current * fence while inside the loop which must be dropped when breaking out. + * + * For a deep dive iterator see dma_fence_unwrap_for_each(). */ #define dma_fence_chain_for_each(iter, head) \ for (iter = dma_fence_get(head); iter; \ diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h new file mode 100644 index 000000000000..77e335a1bcac --- /dev/null +++ b/include/linux/dma-fence-unwrap.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * fence-chain: chain fences together in a timeline + * + * Copyright (C) 2022 Advanced Micro Devices, Inc. + * Authors: + * Christian König + */ + +#ifndef __LINUX_DMA_FENCE_UNWRAP_H +#define __LINUX_DMA_FENCE_UNWRAP_H + +#include +#include + +/** + * struct dma_fence_unwrap - cursor into the container structure + * + * Should be used with dma_fence_unwrap_for_each() iterator macro. + */ +struct dma_fence_unwrap { + /** + * @chain: potential dma_fence_chain, but can be other fence as well + */ + struct dma_fence *chain; + /** + * @array: potential dma_fence_array, but can be other fence as well + */ + struct dma_fence *array; + /** + * @index: last returned index if @array is really a dma_fence_array + */ + unsigned int index; +}; + +/* Internal helper to start new array iteration, don't use directly */ +static inline struct dma_fence * +__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor) +{ + cursor->array = dma_fence_chain_contained(cursor->chain); + cursor->index = 0; + return dma_fence_array_first(cursor->array); +} + +/** + * dma_fence_unwrap_first - return the first fence from fence containers + * @head: the entrypoint into the containers + * @cursor: current position inside the containers + * + * Unwraps potential dma_fence_chain/dma_fence_array containers and return the + * first fence. + */ +static inline struct dma_fence * +dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor) +{ + cursor->chain = dma_fence_get(head); + return __dma_fence_unwrap_array(cursor); +} + +/** + * dma_fence_unwrap_next - return the next fence from a fence containers + * @cursor: current position inside the containers + * + * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return + * the next fence from them. + */ +static inline struct dma_fence * +dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) +{ + struct dma_fence *tmp; + + ++cursor->index; + tmp = dma_fence_array_next(cursor->array, cursor->index); + if (tmp) + return tmp; + + cursor->chain = dma_fence_chain_walk(cursor->chain); + return __dma_fence_unwrap_array(cursor); +} + +/** + * dma_fence_unwrap_for_each - iterate over all fences in containers + * @fence: current fence + * @cursor: current position inside the containers + * @head: starting point for the iterator + * + * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all + * potential fences in them. If @head is just a normal fence only that one is + * returned. + */ +#define dma_fence_unwrap_for_each(fence, cursor, head) \ + for (fence = dma_fence_unwrap_first(head, cursor); fence; \ + fence = dma_fence_unwrap_next(cursor)) + +#endif -- cgit v1.2.3-71-gd317 From 421ab1be43bd015ffe744f4ea25df4f19d1ce6fe Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 25 Mar 2022 10:37:31 -0400 Subject: SUNRPC: Do not dereference non-socket transports in sysfs Do not cast the struct xprt to a sock_xprt unless we know it is a UDP or TCP transport. Otherwise the call to lock the mutex will scribble over whatever structure is actually there. This has been seen to cause hard system lockups when the underlying transport was RDMA. Fixes: b49ea673e119 ("SUNRPC: lock against ->sock changing during sysfs read") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 3 +++ include/linux/sunrpc/xprtsock.h | 1 - net/sunrpc/sysfs.c | 55 ++++++++++++++++++++--------------------- net/sunrpc/xprtsock.c | 26 +++++++++++++++++-- 4 files changed, 54 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 955ea4d7af0b..eef5e87c03b4 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -139,6 +139,9 @@ struct rpc_xprt_ops { void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); + int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf, + size_t buflen); + unsigned short (*get_srcport)(struct rpc_xprt *xprt); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); void (*prepare_request)(struct rpc_rqst *req); diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 3eb0079669c5..38284f25eddf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -10,7 +10,6 @@ int init_socket_xprt(void); void cleanup_socket_xprt(void); -unsigned short get_srcport(struct rpc_xprt *); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c index 05c758da6a92..9d8a7d9f3e41 100644 --- a/net/sunrpc/sysfs.c +++ b/net/sunrpc/sysfs.c @@ -97,7 +97,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj, return 0; ret = sprintf(buf, "%s\n", xprt->address_strings[RPC_DISPLAY_ADDR]); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, @@ -105,33 +105,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj, char *buf) { struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); - struct sockaddr_storage saddr; - struct sock_xprt *sock; - ssize_t ret = -1; + size_t buflen = PAGE_SIZE; + ssize_t ret = -ENOTSOCK; if (!xprt || !xprt_connected(xprt)) { - xprt_put(xprt); - return -ENOTCONN; + ret = -ENOTCONN; + } else if (xprt->ops->get_srcaddr) { + ret = xprt->ops->get_srcaddr(xprt, buf, buflen); + if (ret > 0) { + if (ret < buflen - 1) { + buf[ret] = '\n'; + ret++; + buf[ret] = '\0'; + } + } } - - sock = container_of(xprt, struct sock_xprt, xprt); - mutex_lock(&sock->recv_mutex); - if (sock->sock == NULL || - kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0) - goto out; - - ret = sprintf(buf, "%pISc\n", &saddr); -out: - mutex_unlock(&sock->recv_mutex); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) + struct kobj_attribute *attr, char *buf) { struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj); + unsigned short srcport = 0; + size_t buflen = PAGE_SIZE; ssize_t ret; if (!xprt || !xprt_connected(xprt)) { @@ -139,7 +137,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, return -ENOTCONN; } - ret = sprintf(buf, "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n" + if (xprt->ops->get_srcport) + srcport = xprt->ops->get_srcport(xprt); + + ret = snprintf(buf, buflen, + "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n" "max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n" "binding_q_len=%u\nsending_q_len=%u\npending_q_len=%u\n" "backlog_q_len=%u\nmain_xprt=%d\nsrc_port=%u\n" @@ -147,14 +149,11 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj, xprt->last_used, xprt->cong, xprt->cwnd, xprt->max_reqs, xprt->min_reqs, xprt->num_reqs, xprt->binding.qlen, xprt->sending.qlen, xprt->pending.qlen, - xprt->backlog.qlen, xprt->main, - (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ? - get_srcport(xprt) : 0, + xprt->backlog.qlen, xprt->main, srcport, atomic_long_read(&xprt->queuelen), - (xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ? - xprt->address_strings[RPC_DISPLAY_PORT] : "0"); + xprt->address_strings[RPC_DISPLAY_PORT]); xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj, @@ -201,7 +200,7 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj, } xprt_put(xprt); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, @@ -220,7 +219,7 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj, xprt_switch->xps_nunique_destaddr_xprts, atomic_long_read(&xprt_switch->xps_queuelen)); xprt_switch_put(xprt_switch); - return ret + 1; + return ret; } static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b52eaa8a0cda..78af7518f263 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1638,7 +1638,7 @@ static int xs_get_srcport(struct sock_xprt *transport) return port; } -unsigned short get_srcport(struct rpc_xprt *xprt) +static unsigned short xs_sock_srcport(struct rpc_xprt *xprt) { struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); unsigned short ret = 0; @@ -1648,7 +1648,25 @@ unsigned short get_srcport(struct rpc_xprt *xprt) mutex_unlock(&sock->recv_mutex); return ret; } -EXPORT_SYMBOL(get_srcport); + +static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen) +{ + struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); + union { + struct sockaddr sa; + struct sockaddr_storage st; + } saddr; + int ret = -ENOTCONN; + + mutex_lock(&sock->recv_mutex); + if (sock->sock) { + ret = kernel_getsockname(sock->sock, &saddr.sa); + if (ret >= 0) + ret = snprintf(buf, buflen, "%pISc", &saddr.sa); + } + mutex_unlock(&sock->recv_mutex); + return ret; +} static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) { @@ -2622,6 +2640,8 @@ static const struct rpc_xprt_ops xs_udp_ops = { .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, + .get_srcaddr = xs_sock_srcaddr, + .get_srcport = xs_sock_srcport, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .send_request = xs_udp_send_request, @@ -2644,6 +2664,8 @@ static const struct rpc_xprt_ops xs_tcp_ops = { .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, + .get_srcaddr = xs_sock_srcaddr, + .get_srcport = xs_sock_srcport, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .prepare_request = xs_stream_prepare_request, -- cgit v1.2.3-71-gd317 From bddac7c1e02ba47f0570e494c9289acea3062cc1 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 26 Mar 2022 10:42:04 -0700 Subject: Revert "swiotlb: rework "fix info leak with DMA_FROM_DEVICE"" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit aa6f8dcbab473f3a3c7454b74caa46d36cdc5d13. It turns out this breaks at least the ath9k wireless driver, and possibly others. What the ath9k driver does on packet receive is to set up the DMA transfer with: int ath_rx_init(..) .. bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, common->rx_bufsize, DMA_FROM_DEVICE); and then the receive logic (through ath_rx_tasklet()) will fetch incoming packets static bool ath_edma_get_buffers(..) .. dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); if (ret == -EINPROGRESS) { /*let device gain the buffer again*/ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, common->rx_bufsize, DMA_FROM_DEVICE); return false; } and it's worth noting how that first DMA sync: dma_sync_single_for_cpu(..DMA_FROM_DEVICE); is there to make sure the CPU can read the DMA buffer (possibly by copying it from the bounce buffer area, or by doing some cache flush). The iommu correctly turns that into a "copy from bounce bufer" so that the driver can look at the state of the packets. In the meantime, the device may continue to write to the DMA buffer, but we at least have a snapshot of the state due to that first DMA sync. But that _second_ DMA sync: dma_sync_single_for_device(..DMA_FROM_DEVICE); is telling the DMA mapping that the CPU wasn't interested in the area because the packet wasn't there. In the case of a DMA bounce buffer, that is a no-op. Note how it's not a sync for the CPU (the "for_device()" part), and it's not a sync for data written by the CPU (the "DMA_FROM_DEVICE" part). Or rather, it _should_ be a no-op. That's what commit aa6f8dcbab47 broke: it made the code bounce the buffer unconditionally, and changed the DMA_FROM_DEVICE to just unconditionally and illogically be DMA_TO_DEVICE. [ Side note: purely within the confines of the swiotlb driver it wasn't entirely illogical: The reason it did that odd DMA_FROM_DEVICE -> DMA_TO_DEVICE conversion thing is because inside the swiotlb driver, it uses just a swiotlb_bounce() helper that doesn't care about the whole distinction of who the sync is for - only which direction to bounce. So it took the "sync for device" to mean that the CPU must have been the one writing, and thought it meant DMA_TO_DEVICE. ] Also note how the commentary in that commit was wrong, probably due to that whole confusion, claiming that the commit makes the swiotlb code "bounce unconditionally (that is, also when dir == DMA_TO_DEVICE) in order do avoid synchronising back stale data from the swiotlb buffer" which is nonsensical for two reasons: - that "also when dir == DMA_TO_DEVICE" is nonsensical, as that was exactly when it always did - and should do - the bounce. - since this is a sync for the device (not for the CPU), we're clearly fundamentally not coping back stale data from the bounce buffers at all, because we'd be copying *to* the bounce buffers. So that commit was just very confused. It confused the direction of the synchronization (to the device, not the cpu) with the direction of the DMA (from the device). Reported-and-bisected-by: Oleksandr Natalenko Reported-by: Olha Cherevyk Cc: Halil Pasic Cc: Christoph Hellwig Cc: Kalle Valo Cc: Robin Murphy Cc: Toke Høiland-Jørgensen Cc: Maxime Bizon Cc: Johannes Berg Signed-off-by: Linus Torvalds --- Documentation/core-api/dma-attributes.rst | 8 ++++++++ include/linux/dma-mapping.h | 8 ++++++++ kernel/dma/swiotlb.c | 23 ++++++++--------------- 3 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst index 1887d92e8e92..17706dc91ec9 100644 --- a/Documentation/core-api/dma-attributes.rst +++ b/Documentation/core-api/dma-attributes.rst @@ -130,3 +130,11 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged subsystem that the buffer is fully accessible at the elevated privilege level (and ideally inaccessible or at least read-only at the lesser-privileged levels). + +DMA_ATTR_OVERWRITE +------------------ + +This is a hint to the DMA-mapping subsystem that the device is expected to +overwrite the entire mapped size, thus the caller does not require any of the +previous buffer contents to be preserved. This allows bounce-buffering +implementations to optimise DMA_FROM_DEVICE transfers. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dca2b1355bb1..6150d11a607e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -61,6 +61,14 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) +/* + * This is a hint to the DMA-mapping subsystem that the device is expected + * to overwrite the entire mapped size, thus the caller does not require any + * of the previous buffer contents to be preserved. This allows + * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. + */ +#define DMA_ATTR_OVERWRITE (1UL << 10) + /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can * be given to a device to use as a DMA source or target. It is specific to a diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 6db1c475ec82..bfc56cb21705 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -627,14 +627,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, for (i = 0; i < nr_slots(alloc_size + offset); i++) mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); tlb_addr = slot_addr(mem->start, index) + offset; - /* - * When dir == DMA_FROM_DEVICE we could omit the copy from the orig - * to the tlb buffer, if we knew for sure the device will - * overwirte the entire current content. But we don't. Thus - * unconditional bounce may prevent leaking swiotlb content (i.e. - * kernel memory) to user-space. - */ - swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || + dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } @@ -701,13 +697,10 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { - /* - * Unconditional bounce is necessary to avoid corruption on - * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite - * the whole lengt of the bounce buffer. - */ - swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); - BUG_ON(!valid_dma_direction(dir)); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); + else + BUG_ON(dir != DMA_FROM_DEVICE); } void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, -- cgit v1.2.3-71-gd317 From 901c7280ca0d5e2b4a8929fbe0bfb007ac2a6544 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Mar 2022 11:37:05 -0700 Subject: Reinstate some of "swiotlb: rework "fix info leak with DMA_FROM_DEVICE"" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Halil Pasic points out [1] that the full revert of that commit (revert in bddac7c1e02b), and that a partial revert that only reverts the problematic case, but still keeps some of the cleanups is probably better.  And that partial revert [2] had already been verified by Oleksandr Natalenko to also fix the issue, I had just missed that in the long discussion. So let's reinstate the cleanups from commit aa6f8dcbab47 ("swiotlb: rework "fix info leak with DMA_FROM_DEVICE""), and effectively only revert the part that caused problems. Link: https://lore.kernel.org/all/20220328013731.017ae3e3.pasic@linux.ibm.com/ [1] Link: https://lore.kernel.org/all/20220324055732.GB12078@lst.de/ [2] Link: https://lore.kernel.org/all/4386660.LvFx2qVVIh@natalenko.name/ [3] Suggested-by: Halil Pasic Tested-by: Oleksandr Natalenko Cc: Christoph Hellwig" Signed-off-by: Linus Torvalds --- Documentation/core-api/dma-attributes.rst | 8 -------- include/linux/dma-mapping.h | 8 -------- kernel/dma/swiotlb.c | 12 ++++++++---- 3 files changed, 8 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst index 17706dc91ec9..1887d92e8e92 100644 --- a/Documentation/core-api/dma-attributes.rst +++ b/Documentation/core-api/dma-attributes.rst @@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged subsystem that the buffer is fully accessible at the elevated privilege level (and ideally inaccessible or at least read-only at the lesser-privileged levels). - -DMA_ATTR_OVERWRITE ------------------- - -This is a hint to the DMA-mapping subsystem that the device is expected to -overwrite the entire mapped size, thus the caller does not require any of the -previous buffer contents to be preserved. This allows bounce-buffering -implementations to optimise DMA_FROM_DEVICE transfers. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 6150d11a607e..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -61,14 +61,6 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) -/* - * This is a hint to the DMA-mapping subsystem that the device is expected - * to overwrite the entire mapped size, thus the caller does not require any - * of the previous buffer contents to be preserved. This allows - * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. - */ -#define DMA_ATTR_OVERWRITE (1UL << 10) - /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can * be given to a device to use as a DMA source or target. It is specific to a diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index bfc56cb21705..6c350555e5a1 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -627,10 +627,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, for (i = 0; i < nr_slots(alloc_size + offset); i++) mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); tlb_addr = slot_addr(mem->start, index) + offset; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); + /* + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig + * to the tlb buffer, if we knew for sure the device will + * overwirte the entire current content. But we don't. Thus + * unconditional bounce may prevent leaking swiotlb content (i.e. + * kernel memory) to user-space. + */ + swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } -- cgit v1.2.3-71-gd317 From 504c1cabe325df65c18ef38365ddd1a41c6b591b Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 25 Jan 2022 21:22:21 +0800 Subject: mm/balloon_compaction: make balloon page compaction callbacks static Since commit b1123ea6d3b3 ("mm: balloon: use general non-lru movable page feature"), these functions are called via balloon_aops callbacks. They're not called directly outside this file. So make them static and clean up the relevant code. Signed-off-by: Miaohe Lin Link: https://lore.kernel.org/r/20220125132221.2220-1-linmiaohe@huawei.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Muchun Song --- include/linux/balloon_compaction.h | 22 ---------------------- mm/balloon_compaction.c | 6 +++--- 2 files changed, 3 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 338aa27e4773..edb7f6d41faa 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) #ifdef CONFIG_BALLOON_COMPACTION extern const struct address_space_operations balloon_aops; -extern bool balloon_page_isolate(struct page *page, - isolate_mode_t mode); -extern void balloon_page_putback(struct page *page); -extern int balloon_page_migrate(struct address_space *mapping, - struct page *newpage, - struct page *page, enum migrate_mode mode); /* * balloon_page_insert - insert a page into the balloon's page list and make @@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page) list_del(&page->lru); } -static inline bool balloon_page_isolate(struct page *page) -{ - return false; -} - -static inline void balloon_page_putback(struct page *page) -{ - return; -} - -static inline int balloon_page_migrate(struct page *newpage, - struct page *page, enum migrate_mode mode) -{ - return 0; -} - static inline gfp_t balloon_mapping_gfp_mask(void) { return GFP_HIGHUSER; diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 907fefde2572..4b8eab4b3f45 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION -bool balloon_page_isolate(struct page *page, isolate_mode_t mode) +static bool balloon_page_isolate(struct page *page, isolate_mode_t mode) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); @@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode) return true; } -void balloon_page_putback(struct page *page) +static void balloon_page_putback(struct page *page) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; @@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page) /* move_to_new_page() counterpart for a ballooned page */ -int balloon_page_migrate(struct address_space *mapping, +static int balloon_page_migrate(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { -- cgit v1.2.3-71-gd317 From a61280ddddaa45f95b60dd54c05f8e0e5b6810b7 Mon Sep 17 00:00:00 2001 From: Longpeng Date: Tue, 15 Mar 2022 11:25:51 +0800 Subject: vdpa: support exposing the config size to userspace - GET_CONFIG_SIZE: return the size of the virtio config space. The size contains the fields which are conditional on feature bits. Acked-by: Jason Wang Signed-off-by: Longpeng Link: https://lore.kernel.org/r/20220315032553.455-2-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefano Garzarella --- drivers/vhost/vdpa.c | 17 +++++++++++++++++ include/linux/vdpa.h | 3 ++- include/uapi/linux/vhost.h | 4 ++++ 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 05f5fd2af58f..04375722c0e5 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -358,6 +358,20 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp) return 0; } +static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u32 size; + + size = ops->get_config_size(vdpa); + + if (copy_to_user(argp, &size, sizeof(size))) + return -EFAULT; + + return 0; +} + static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { @@ -495,6 +509,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_VDPA_GET_IOVA_RANGE: r = vhost_vdpa_get_iova_range(v, argp); break; + case VHOST_VDPA_GET_CONFIG_SIZE: + r = vhost_vdpa_get_config_size(v, argp); + break; default: r = vhost_dev_ioctl(&v->vdev, cmd, argp); if (r == -ENOIOCTLCMD) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 721089bb4c84..a5269191edda 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -207,7 +207,8 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) - * @get_config_size: Get the size of the configuration space + * @get_config_size: Get the size of the configuration space includes + * fields that are conditional on feature bits. * @vdev: vdpa device * Returns size_t: configuration size * @get_config: Read from device specific configuration space diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index c998860d7bbc..bc74e95a273a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -150,4 +150,8 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) + +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + #endif -- cgit v1.2.3-71-gd317 From 81d46d693173a5c86a9b0c648eca1817ad5c0ae5 Mon Sep 17 00:00:00 2001 From: Longpeng Date: Tue, 15 Mar 2022 11:25:52 +0800 Subject: vdpa: change the type of nvqs to u32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change vdpa_device.nvqs and vhost_vdpa.nvqs to use u32 Signed-off-by: Longpeng Link: https://lore.kernel.org/r/20220315032553.455-3-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Signed-off-by: Longpeng <longpeng2@huawei.com>

Acked-by: Jason Wang <jasowang@redhat.com>
 
Reviewed-by: Stefano Garzarella --- drivers/vdpa/vdpa.c | 6 +++--- drivers/vhost/vdpa.c | 10 ++++++---- include/linux/vdpa.h | 6 +++--- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 1ea525433a5c..2b75c00b1005 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data) return (strcmp(dev_name(&vdev->dev), data) == 0); } -static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) +static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { struct device *dev; @@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) * * Return: Returns an error when fail to add device to vDPA bus */ -int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) +int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { if (!vdev->mdev) return -EINVAL; @@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device); * * Return: Returns an error when fail to add to vDPA bus */ -int vdpa_register_device(struct vdpa_device *vdev, int nvqs) +int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { int err; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 04375722c0e5..3b7fc2fe45f3 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -42,7 +42,7 @@ struct vhost_vdpa { struct device dev; struct cdev cdev; atomic_t opened; - int nvqs; + u32 nvqs; int virtio_id; int minor; struct eventfd_ctx *config_ctx; @@ -161,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; u8 status, status_old; - int ret, nvqs = v->nvqs; + u32 nvqs = v->nvqs; + int ret; u16 i; if (copy_from_user(&status, statusp, sizeof(status))) @@ -968,7 +969,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) struct vhost_vdpa *v; struct vhost_dev *dev; struct vhost_virtqueue **vqs; - int nvqs, i, r, opened; + int r, opened; + u32 i, nvqs; v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); @@ -1021,7 +1023,7 @@ err: static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) { - int i; + u32 i; for (i = 0; i < v->nvqs; i++) vhost_vdpa_unsetup_vq_irq(v, i); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index a5269191edda..8943a209202e 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -83,7 +83,7 @@ struct vdpa_device { unsigned int index; bool features_valid; bool use_va; - int nvqs; + u32 nvqs; struct vdpa_mgmt_dev *mdev; }; @@ -338,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, dev_struct, member)), name, use_va), \ dev_struct, member) -int vdpa_register_device(struct vdpa_device *vdev, int nvqs); +int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); void vdpa_unregister_device(struct vdpa_device *vdev); -int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); +int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** -- cgit v1.2.3-71-gd317 From f32404ae1bb9a7428a3c77419672a28895d185bf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 25 Mar 2022 22:50:23 +0100 Subject: net: move net_unlink_todo() out of the header There's no reason for this to be in netdevice.h, it's all just used in dev.c. Also make it no longer inline and let the compiler decide to do that by itself. Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20220325225023.f49b9056fe1c.I6b901a2df00000837a9bd251a8dd259bd23f5ded@changeid Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 10 ---------- net/core/dev.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cd7a597c55b1..59e27a2b7bf0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4601,16 +4601,6 @@ bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); -#ifdef CONFIG_LOCKDEP -static LIST_HEAD(net_unlink_list); - -static inline void net_unlink_todo(struct net_device *dev) -{ - if (list_empty(&dev->unlink_list)) - list_add_tail(&dev->unlink_list, &net_unlink_list); -} -#endif - /* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->adj_list.upper, \ diff --git a/net/core/dev.c b/net/core/dev.c index 8a5109479dbe..8c6c08446556 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7193,6 +7193,16 @@ static int __netdev_update_upper_level(struct net_device *dev, return 0; } +#ifdef CONFIG_LOCKDEP +static LIST_HEAD(net_unlink_list); + +static void net_unlink_todo(struct net_device *dev) +{ + if (list_empty(&dev->unlink_list)) + list_add_tail(&dev->unlink_list, &net_unlink_list); +} +#endif + static int __netdev_update_lower_level(struct net_device *dev, struct netdev_nested_priv *priv) { -- cgit v1.2.3-71-gd317 From 73f9b911faa74ac5107879de05c9489c419f41bb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 26 Mar 2022 11:27:05 +0900 Subject: kprobes: Use rethook for kretprobe if possible Use rethook for kretprobe function return hooking if the arch sets CONFIG_HAVE_RETHOOK=y. In this case, CONFIG_KRETPROBE_ON_RETHOOK is set to 'y' automatically, and the kretprobe internal data fields switches to use rethook. If not, it continues to use kretprobe specific function return hooks. Suggested-by: Peter Zijlstra Signed-off-by: Masami Hiramatsu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164826162556.2455864.12255833167233452047.stgit@devnote2 --- arch/Kconfig | 8 ++- include/linux/kprobes.h | 51 +++++++++++++++++- kernel/Makefile | 1 + kernel/kprobes.c | 124 +++++++++++++++++++++++++++++++++++++------- kernel/trace/trace_kprobe.c | 4 +- 5 files changed, 163 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 84bc1de02720..33e06966f248 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -164,7 +164,13 @@ config ARCH_USE_BUILTIN_BSWAP config KRETPROBES def_bool y - depends on KPROBES && HAVE_KRETPROBES + depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK) + +config KRETPROBE_ON_RETHOOK + def_bool y + depends on HAVE_RETHOOK + depends on KRETPROBES + select RETHOOK config USER_RETURN_NOTIFIER bool diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 312ff997c743..157168769fc2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #ifdef CONFIG_KPROBES @@ -149,13 +150,20 @@ struct kretprobe { int maxactive; int nmissed; size_t data_size; +#ifdef CONFIG_KRETPROBE_ON_RETHOOK + struct rethook *rh; +#else struct freelist_head freelist; struct kretprobe_holder *rph; +#endif }; #define KRETPROBE_MAX_DATA_SIZE 4096 struct kretprobe_instance { +#ifdef CONFIG_KRETPROBE_ON_RETHOOK + struct rethook_node node; +#else union { struct freelist_node freelist; struct rcu_head rcu; @@ -164,6 +172,7 @@ struct kretprobe_instance { struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; void *fp; +#endif char data[]; }; @@ -186,10 +195,24 @@ extern void kprobe_busy_begin(void); extern void kprobe_busy_end(void); #ifdef CONFIG_KRETPROBES -extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs); +/* Check whether @p is used for implementing a trampoline. */ extern int arch_trampoline_kprobe(struct kprobe *p); +#ifdef CONFIG_KRETPROBE_ON_RETHOOK +static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), + "Kretprobe is accessed from instance under preemptive context"); + + return (struct kretprobe *)READ_ONCE(ri->node.rethook->data); +} +static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) +{ + return ri->node.ret_addr; +} +#else +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); void arch_kretprobe_fixup_return(struct pt_regs *regs, kprobe_opcode_t *correct_ret_addr); @@ -232,6 +255,12 @@ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance return READ_ONCE(ri->rph->rp); } +static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) +{ + return (unsigned long)ri->ret_addr; +} +#endif /* CONFIG_KRETPROBE_ON_RETHOOK */ + #else /* !CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -395,7 +424,11 @@ void unregister_kretprobe(struct kretprobe *rp); int register_kretprobes(struct kretprobe **rps, int num); void unregister_kretprobes(struct kretprobe **rps, int num); +#ifdef CONFIG_KRETPROBE_ON_RETHOOK +#define kprobe_flush_task(tk) do {} while (0) +#else void kprobe_flush_task(struct task_struct *tk); +#endif void kprobe_free_init_mem(void); @@ -509,6 +542,19 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) #endif /* !CONFIG_OPTPROBES */ #ifdef CONFIG_KRETPROBES +#ifdef CONFIG_KRETPROBE_ON_RETHOOK +static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) +{ + return is_rethook_trampoline(addr); +} + +static nokprobe_inline +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, + struct llist_node **cur) +{ + return rethook_find_ret_addr(tsk, (unsigned long)fp, cur); +} +#else static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) { return (void *)addr == kretprobe_trampoline_addr(); @@ -516,6 +562,7 @@ static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, struct llist_node **cur); +#endif #else static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr) { diff --git a/kernel/Makefile b/kernel/Makefile index 56f4ee97f328..471d71935e90 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -108,6 +108,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_TRACE_CLOCK) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ +obj-$(CONFIG_RETHOOK) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_BPF) += bpf/ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 185badc780b7..dbe57df2e199 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1237,6 +1237,27 @@ void kprobes_inc_nmissed_count(struct kprobe *p) } NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); +static struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + +#if !defined(CONFIG_KRETPROBE_ON_RETHOOK) static void free_rp_inst_rcu(struct rcu_head *head) { struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); @@ -1258,26 +1279,6 @@ static void recycle_rp_inst(struct kretprobe_instance *ri) } NOKPROBE_SYMBOL(recycle_rp_inst); -static struct kprobe kprobe_busy = { - .addr = (void *) get_kprobe, -}; - -void kprobe_busy_begin(void) -{ - struct kprobe_ctlblk *kcb; - - preempt_disable(); - __this_cpu_write(current_kprobe, &kprobe_busy); - kcb = get_kprobe_ctlblk(); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; -} - -void kprobe_busy_end(void) -{ - __this_cpu_write(current_kprobe, NULL); - preempt_enable(); -} - /* * This function is called from delayed_put_task_struct() when a task is * dead and cleaned up to recycle any kretprobe instances associated with @@ -1327,6 +1328,7 @@ static inline void free_rp_inst(struct kretprobe *rp) rp->rph = NULL; } } +#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ /* Add the new probe to 'ap->list'. */ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) @@ -1925,6 +1927,7 @@ static struct notifier_block kprobe_exceptions_nb = { #ifdef CONFIG_KRETPROBES +#if !defined(CONFIG_KRETPROBE_ON_RETHOOK) /* This assumes the 'tsk' is the current task or the is not running. */ static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk, struct llist_node **cur) @@ -2087,6 +2090,57 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) return 0; } NOKPROBE_SYMBOL(pre_handler_kretprobe); +#else /* CONFIG_KRETPROBE_ON_RETHOOK */ +/* + * This kprobe pre_handler is registered with every kretprobe. When probe + * hits it will set up the return probe. + */ +static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) +{ + struct kretprobe *rp = container_of(p, struct kretprobe, kp); + struct kretprobe_instance *ri; + struct rethook_node *rhn; + + rhn = rethook_try_get(rp->rh); + if (!rhn) { + rp->nmissed++; + return 0; + } + + ri = container_of(rhn, struct kretprobe_instance, node); + + if (rp->entry_handler && rp->entry_handler(ri, regs)) + rethook_recycle(rhn); + else + rethook_hook(rhn, regs, kprobe_ftrace(p)); + + return 0; +} +NOKPROBE_SYMBOL(pre_handler_kretprobe); + +static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, + struct pt_regs *regs) +{ + struct kretprobe *rp = (struct kretprobe *)data; + struct kretprobe_instance *ri; + struct kprobe_ctlblk *kcb; + + /* The data must NOT be null. This means rethook data structure is broken. */ + if (WARN_ON_ONCE(!data)) + return; + + __this_cpu_write(current_kprobe, &rp->kp); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + ri = container_of(rh, struct kretprobe_instance, node); + rp->handler(ri, regs); + + __this_cpu_write(current_kprobe, NULL); +} +NOKPROBE_SYMBOL(kretprobe_rethook_handler); + +#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ /** * kprobe_on_func_entry() -- check whether given address is function entry @@ -2155,6 +2209,29 @@ int register_kretprobe(struct kretprobe *rp) rp->maxactive = num_possible_cpus(); #endif } +#ifdef CONFIG_KRETPROBE_ON_RETHOOK + rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler); + if (!rp->rh) + return -ENOMEM; + + for (i = 0; i < rp->maxactive; i++) { + inst = kzalloc(sizeof(struct kretprobe_instance) + + rp->data_size, GFP_KERNEL); + if (inst == NULL) { + rethook_free(rp->rh); + rp->rh = NULL; + return -ENOMEM; + } + rethook_add_node(rp->rh, &inst->node); + } + rp->nmissed = 0; + /* Establish function entry probe point */ + ret = register_kprobe(&rp->kp); + if (ret != 0) { + rethook_free(rp->rh); + rp->rh = NULL; + } +#else /* !CONFIG_KRETPROBE_ON_RETHOOK */ rp->freelist.head = NULL; rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); if (!rp->rph) @@ -2179,6 +2256,7 @@ int register_kretprobe(struct kretprobe *rp) ret = register_kprobe(&rp->kp); if (ret != 0) free_rp_inst(rp); +#endif return ret; } EXPORT_SYMBOL_GPL(register_kretprobe); @@ -2217,7 +2295,11 @@ void unregister_kretprobes(struct kretprobe **rps, int num) for (i = 0; i < num; i++) { if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; +#ifdef CONFIG_KRETPROBE_ON_RETHOOK + rethook_free(rps[i]->rh); +#else rps[i]->rph->rp = NULL; +#endif } mutex_unlock(&kprobe_mutex); @@ -2225,7 +2307,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num) for (i = 0; i < num; i++) { if (rps[i]->kp.addr) { __unregister_kprobe_bottom(&rps[i]->kp); +#ifndef CONFIG_KRETPROBE_ON_RETHOOK free_rp_inst(rps[i]); +#endif } } } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b62fd785b599..47cebef78532 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1433,7 +1433,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, fbuffer.regs = regs; entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); entry->func = (unsigned long)tk->rp.kp.addr; - entry->ret_ip = (unsigned long)ri->ret_addr; + entry->ret_ip = get_kretprobe_retaddr(ri); store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); trace_event_buffer_commit(&fbuffer); @@ -1628,7 +1628,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, return; entry->func = (unsigned long)tk->rp.kp.addr; - entry->ret_ip = (unsigned long)ri->ret_addr; + entry->ret_ip = get_kretprobe_retaddr(ri); store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, head, NULL); -- cgit v1.2.3-71-gd317 From 5974ea7ce0f9a5987fc8cf5e08ad6e3e70bb542e Mon Sep 17 00:00:00 2001 From: Sungup Moon Date: Mon, 14 Mar 2022 20:05:45 +0900 Subject: nvme: allow duplicate NSIDs for private namespaces A NVMe subsystem with multiple controller can have private namespaces that use the same NSID under some conditions: "If Namespace Management, ANA Reporting, or NVM Sets are supported, the NSIDs shall be unique within the NVM subsystem. If the Namespace Management, ANA Reporting, and NVM Sets are not supported, then NSIDs: a) for shared namespace shall be unique; and b) for private namespace are not required to be unique." Reference: Section 6.1.6 NSID and Namespace Usage; NVM Express 1.4c spec. Make sure this specific setup is supported in Linux. Fixes: 9ad1927a3bc2 ("nvme: always search for namespace head") Signed-off-by: Sungup Moon [hch: refactored and fixed the controller vs subsystem based naming conflict] Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg --- drivers/nvme/host/core.c | 15 ++++++++++----- drivers/nvme/host/multipath.c | 7 ++++--- drivers/nvme/host/nvme.h | 19 +++++++++++++++++++ include/linux/nvme.h | 1 + 4 files changed, 34 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ccc5877d514b..6dc05c1fa7db 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3627,15 +3627,20 @@ static const struct attribute_group *nvme_dev_attr_groups[] = { NULL, }; -static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, +static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_head *h; - lockdep_assert_held(&subsys->lock); + lockdep_assert_held(&ctrl->subsys->lock); - list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id != nsid) + list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { + /* + * Private namespaces can share NSIDs under some conditions. + * In that case we can't use the same ns_head for namespaces + * with the same NSID. + */ + if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; @@ -3829,7 +3834,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, } mutex_lock(&ctrl->subsys->lock); - head = nvme_find_ns_head(ctrl->subsys, nsid); + head = nvme_find_ns_head(ctrl, nsid); if (!head) { ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids); if (ret) { diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index c97d7f843977..ba90555124c4 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -482,10 +482,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) /* * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing data could - * change after a rescan. + * We also do this for private namespaces as the namespace sharing flag + * could change after a rescan. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !nvme_is_unique_nsid(ctrl, head) || !multipath) return 0; head->disk = blk_alloc_disk(ctrl->numa_node); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1ea908d43e17..1552a48719d6 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -722,6 +722,25 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, return queue_live; return __nvme_check_ready(ctrl, rq, queue_live); } + +/* + * NSID shall be unique for all shared namespaces, or if at least one of the + * following conditions is met: + * 1. Namespace Management is supported by the controller + * 2. ANA is supported by the controller + * 3. NVM Set are supported by the controller + * + * In other case, private namespace are not required to report a unique NSID. + */ +static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head) +{ + return head->shared || + (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || + (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || + (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); +} + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 9dbc3ef4daf7..2dcee34d467d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -345,6 +345,7 @@ enum { NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, + NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, -- cgit v1.2.3-71-gd317 From 3ae8fd41573af4fb3a490c9ed947fc936ba87190 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 11 Jan 2022 16:57:50 -0600 Subject: rtc: mc146818-lib: Fix the AltCentury for AMD platforms Setting the century forward has been failing on AMD platforms. There was a previous attempt at fixing this for family 0x17 as part of commit 7ad295d5196a ("rtc: Fix the AltCentury value on AMD/Hygon platform") but this was later reverted due to some problems reported that appeared to stem from an FW bug on a family 0x17 desktop system. The same comments mentioned in the previous commit continue to apply to the newer platforms as well. ``` MC146818 driver use function mc146818_set_time() to set register RTC_FREQ_SELECT(RTC_REG_A)'s bit4-bit6 field which means divider stage reset value on Intel platform to 0x7. While AMD/Hygon RTC_REG_A(0Ah)'s bit4 is defined as DV0 [Reference]: DV0 = 0 selects Bank 0, DV0 = 1 selects Bank 1. Bit5-bit6 is defined as reserved. DV0 is set to 1, it will select Bank 1, which will disable AltCentury register(0x32) access. As UEFI pass acpi_gbl_FADT.century 0x32 (AltCentury), the CMOS write will be failed on code: CMOS_WRITE(century, acpi_gbl_FADT.century). Correct RTC_REG_A bank select bit(DV0) to 0 on AMD/Hygon CPUs, it will enable AltCentury(0x32) register writing and finally setup century as expected. ``` However in closer examination the change previously submitted was also modifying bits 5 & 6 which are declared reserved in the AMD documentation. So instead modify just the DV0 bank selection bit. Being cognizant that there was a failure reported before, split the code change out to a static function that can also be used for exclusions if any regressions such as Mikhail's pop up again. Cc: Jinke Fan Cc: Mikhail Gavrilov Link: https://lore.kernel.org/all/CABXGCsMLob0DC25JS8wwAYydnDoHBSoMh2_YLPfqm3TTvDE-Zw@mail.gmail.com/ Link: https://www.amd.com/system/files/TechDocs/51192_Bolton_FCH_RRG.pdf Signed-off-by: Raul E Rangel Signed-off-by: Mario Limonciello Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220111225750.1699-1-mario.limonciello@amd.com --- drivers/rtc/rtc-mc146818-lib.c | 16 +++++++++++++++- include/linux/mc146818rtc.h | 2 ++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 562f99b664a2..522449b25921 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -176,6 +176,17 @@ int mc146818_get_time(struct rtc_time *time) } EXPORT_SYMBOL_GPL(mc146818_get_time); +/* AMD systems don't allow access to AltCentury with DV1 */ +static bool apply_amd_register_a_behavior(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return true; +#endif + return false; +} + /* Set the current date and time in the real time clock. */ int mc146818_set_time(struct rtc_time *time) { @@ -249,7 +260,10 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 808bb4cee230..b0da04fe087b 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -86,6 +86,8 @@ struct cmos_rtc_board_info { /* 2 values for divider stage reset, others for "testing purposes only" */ # define RTC_DIV_RESET1 0x60 # define RTC_DIV_RESET2 0x70 + /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ +# define RTC_AMD_BANK_SELECT 0x10 /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ # define RTC_RATE_SELECT 0x0F -- cgit v1.2.3-71-gd317 From eb07d5a4da041fd2e30e386e5fd12d23bb31cf9e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 30 Mar 2022 11:48:37 +1100 Subject: SUNRPC: handle malloc failure in ->request_prepare If ->request_prepare() detects an error, it sets ->rq_task->tk_status. This is easy for callers to ignore. The only caller is xprt_request_enqueue_receive() and it does ignore the error, as does call_encode() which calls it. This can result in a request being queued to receive a reply without an allocated receive buffer. So instead of setting rq_task->tk_status, return an error, and store in ->tk_status only in call_encode(); The call to xprt_request_enqueue_receive() is now earlier in call_encode(), where the error can still be handled. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 5 ++--- net/sunrpc/clnt.c | 6 +++--- net/sunrpc/xprt.c | 23 +++++++++++++++-------- net/sunrpc/xprtsock.c | 4 ++-- 4 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index eef5e87c03b4..f171f8c09e13 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -144,7 +144,7 @@ struct rpc_xprt_ops { unsigned short (*get_srcport)(struct rpc_xprt *xprt); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); - void (*prepare_request)(struct rpc_rqst *req); + int (*prepare_request)(struct rpc_rqst *req); int (*send_request)(struct rpc_rqst *req); void (*wait_for_reply_request)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); @@ -357,10 +357,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req); -void xprt_request_prepare(struct rpc_rqst *req); bool xprt_prepare_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task); -void xprt_request_enqueue_receive(struct rpc_task *task); +int xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task); void xprt_request_dequeue_xprt(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8bf2af8546d2..3c7407104d54 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1858,6 +1858,9 @@ call_encode(struct rpc_task *task) xprt_request_dequeue_xprt(task); /* Encode here so that rpcsec_gss can use correct sequence number. */ rpc_xdr_encode(task); + /* Add task to reply queue before transmission to avoid races */ + if (task->tk_status == 0 && rpc_reply_expected(task)) + task->tk_status = xprt_request_enqueue_receive(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ @@ -1881,9 +1884,6 @@ call_encode(struct rpc_task *task) return; } - /* Add task to reply queue before transmission to avoid races */ - if (rpc_reply_expected(task)) - xprt_request_enqueue_receive(task); xprt_request_enqueue_transmit(task); out: task->tk_action = call_transmit; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 880bfe8dc7f6..73344ffb2692 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -69,10 +69,11 @@ /* * Local functions */ -static void xprt_init(struct rpc_xprt *xprt, struct net *net); +static void xprt_init(struct rpc_xprt *xprt, struct net *net); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); -static void xprt_destroy(struct rpc_xprt *xprt); -static void xprt_request_init(struct rpc_task *task); +static void xprt_destroy(struct rpc_xprt *xprt); +static void xprt_request_init(struct rpc_task *task); +static int xprt_request_prepare(struct rpc_rqst *req); static DEFINE_SPINLOCK(xprt_list_lock); static LIST_HEAD(xprt_list); @@ -1143,16 +1144,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) * @task: RPC task * */ -void +int xprt_request_enqueue_receive(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; + int ret; if (!xprt_request_need_enqueue_receive(task, req)) - return; + return 0; - xprt_request_prepare(task->tk_rqstp); + ret = xprt_request_prepare(task->tk_rqstp); + if (ret) + return ret; spin_lock(&xprt->queue_lock); /* Update the softirq receive buffer */ @@ -1166,6 +1170,7 @@ xprt_request_enqueue_receive(struct rpc_task *task) /* Turn off autodisconnect */ del_singleshot_timer_sync(&xprt->timer); + return 0; } /** @@ -1452,14 +1457,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task) * * Calls into the transport layer to do whatever is needed to prepare * the request for transmission or receive. + * Returns error, or zero. */ -void +static int xprt_request_prepare(struct rpc_rqst *req) { struct rpc_xprt *xprt = req->rq_xprt; if (xprt->ops->prepare_request) - xprt->ops->prepare_request(req); + return xprt->ops->prepare_request(req); + return 0; } /** diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 78af7518f263..9b75891b3cc0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -822,11 +822,11 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) return ret; } -static void +static int xs_stream_prepare_request(struct rpc_rqst *req) { xdr_free_bvec(&req->rq_rcv_buf); - req->rq_task->tk_status = xdr_alloc_bvec( + return xdr_alloc_bvec( &req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); } -- cgit v1.2.3-71-gd317 From 7968778914e53788a01c2dee2692cab157de9ac0 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 9 Mar 2022 20:50:39 +0100 Subject: PCI: Remove the deprecated "pci-dma-compat.h" API Now that all usages of the functions defined in "pci-dma-compat.h" have been removed, it is time to remove this file as well. In order not to break builds, move the "#include " that was in "pci-dma-compat.h" into "include/linux/pci.h" Signed-off-by: Christophe JAILLET Acked-by: Bjorn Helgaas Signed-off-by: Christoph Hellwig --- include/linux/pci-dma-compat.h | 129 ----------------------------------------- include/linux/pci.h | 3 +- 2 files changed, 1 insertion(+), 131 deletions(-) delete mode 100644 include/linux/pci-dma-compat.h (limited to 'include/linux') diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h deleted file mode 100644 index 249d4d7fbf18..000000000000 --- a/include/linux/pci-dma-compat.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* include this file if the platform implements the dma_ DMA Mapping API - * and wants to provide the pci_ DMA Mapping API in terms of it */ - -#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H -#define _ASM_GENERIC_PCI_DMA_COMPAT_H - -#include - -/* This defines the direction arg to the DMA mapping routines. */ -#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL -#define PCI_DMA_TODEVICE DMA_TO_DEVICE -#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE -#define PCI_DMA_NONE DMA_NONE - -static inline void * -pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void * -pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void -pci_free_consistent(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); -} - -static inline dma_addr_t -pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) -{ - return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); -} - -static inline dma_addr_t -pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) -{ - return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, - size_t size, int direction) -{ - dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); -} - -static inline int -pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline int -pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) -{ - return dma_mapping_error(&pdev->dev, dma_addr); -} - -#ifdef CONFIG_PCI -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_mask(&dev->dev, mask); -} - -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_coherent_mask(&dev->dev, mask); -} -#else -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -#endif - -#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index b957eeb89c7a..60adf42460ab 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2473,8 +2473,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #endif -/* Provide the legacy pci_dma_* API */ -#include +#include #define pci_printk(level, pdev, fmt, arg...) \ dev_printk(level, &(pdev)->dev, fmt, ##arg) -- cgit v1.2.3-71-gd317 From c18c86808b78c4c2dc69f27f37c57abab14ee387 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 30 Mar 2022 02:22:17 -0400 Subject: Revert "virtio_config: introduce a new .enable_cbs method" This reverts commit d50497eb4e554e1f0351e1836ee7241c059592e6. The new callback ended up not being used, and it's asymmetrical: just enable, no disable. Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- include/linux/virtio_config.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index dafdc7f48c01..b341dd62aa4d 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -23,8 +23,6 @@ struct virtio_shm_region { * any of @get/@set, @get_status/@set_status, or @get_features/ * @finalize_features are NOT safe to be called from an atomic * context. - * @enable_cbs: enable the callbacks - * vdev: the virtio_device * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field @@ -78,7 +76,6 @@ struct virtio_shm_region { */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { - void (*enable_cbs)(struct virtio_device *vdev); void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, @@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev) { unsigned status = dev->config->get_status(dev); - if (dev->config->enable_cbs) - dev->config->enable_cbs(dev); - BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); } -- cgit v1.2.3-71-gd317 From 4a9c7bbe2ed4d2b240674b1fb606c41d3940c412 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 29 Mar 2022 18:14:56 -0700 Subject: bpf: Resolve to prog->aux->dst_prog->type only for BPF_PROG_TYPE_EXT The commit 7e40781cc8b7 ("bpf: verifier: Use target program's type for access verifications") fixes the verifier checking for BPF_PROG_TYPE_EXT (extension) prog such that the verifier looks for things based on the target prog type that it is extending instead of the BPF_PROG_TYPE_EXT itself. The current resolve_prog_type() returns the target prog type. It checks for nullness on prog->aux->dst_prog. However, when loading a BPF_PROG_TYPE_TRACING prog and it is tracing another bpf prog instead of a kernel function, prog->aux->dst_prog is not NULL also. In this case, the verifier should still verify as the BPF_PROG_TYPE_TRACING type instead of the traced prog type in prog->aux->dst_prog->type. An oops has been reported when tracing a struct_ops prog. A NULL dereference happened in check_return_code() when accessing the prog->aux->attach_func_proto->type and prog->aux->attach_func_proto is NULL here because the traced struct_ops prog has the "unreliable" set. This patch is to change the resolve_prog_type() to only return the target prog type if the prog being verified is BPF_PROG_TYPE_EXT. Fixes: 7e40781cc8b7 ("bpf: verifier: Use target program's type for access verifications") Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220330011456.2984509-1-kafai@fb.com --- include/linux/bpf_verifier.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c1fc4af47f69..3a9d2d7cc6b7 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -570,9 +570,11 @@ static inline u32 type_flag(u32 type) return type & ~BPF_BASE_TYPE_MASK; } +/* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) { - return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; + return prog->type == BPF_PROG_TYPE_EXT ? + prog->aux->dst_prog->type : prog->type; } #endif /* _LINUX_BPF_VERIFIER_H */ -- cgit v1.2.3-71-gd317 From 7dd5ad2d3e82fb55229e3fe18e09160878e77e20 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 31 Mar 2022 10:36:55 +0200 Subject: Revert "signal, x86: Delay calling signals in atomic on RT enabled kernels" Revert commit bf9ad37dc8a. It needs to be better encapsulated and generalized. Signed-off-by: Thomas Gleixner Cc: "Eric W. Biederman" Cc: Oleg Nesterov Cc: Sebastian Andrzej Siewior --- arch/x86/Kconfig | 1 - include/linux/sched.h | 3 --- kernel/Kconfig.preempt | 12 +----------- kernel/entry/common.c | 14 -------------- kernel/signal.c | 40 ---------------------------------------- 5 files changed, 1 insertion(+), 69 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7340d9f01b62..442a426e8a68 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -122,7 +122,6 @@ config X86 select ARCH_WANT_GENERAL_HUGETLB select ARCH_WANT_HUGE_PMD_SHARE select ARCH_WANT_LD_ORPHAN_WARN - select ARCH_WANTS_RT_DELAYED_SIGNALS select ARCH_WANTS_THP_SWAP if X86_64 select ARCH_HAS_PARANOID_L1D_FLUSH select BUILDTIME_TABLE_SORT diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a6fdd2a679f..d5e3c00b74e1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1090,9 +1090,6 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; -#ifdef CONFIG_RT_DELAYED_SIGNALS - struct kernel_siginfo forced_info; -#endif unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 8c6de5a9ecc4..c2f1fd95a821 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -133,14 +133,4 @@ config SCHED_CORE which is the likely usage by Linux distributions, there should be no measurable impact on performance. -config ARCH_WANTS_RT_DELAYED_SIGNALS - bool - help - This option is selected by architectures where raising signals - can happen in atomic contexts on PREEMPT_RT enabled kernels. This - option delays raising the signal until the return to user space - loop where it is also delivered. X86 requires this to deliver - signals from trap handlers which run on IST stacks. - -config RT_DELAYED_SIGNALS - def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS + diff --git a/kernel/entry/common.c b/kernel/entry/common.c index ef8d94a98b7e..e57a224d6b79 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -142,18 +142,6 @@ void noinstr exit_to_user_mode(void) /* Workaround to allow gradual conversion of architecture code */ void __weak arch_do_signal_or_restart(struct pt_regs *regs) { } -#ifdef CONFIG_RT_DELAYED_SIGNALS -static inline void raise_delayed_signal(void) -{ - if (unlikely(current->forced_info.si_signo)) { - force_sig_info(¤t->forced_info); - current->forced_info.si_signo = 0; - } -} -#else -static inline void raise_delayed_signal(void) { } -#endif - static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) { @@ -168,8 +156,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_NEED_RESCHED) schedule(); - raise_delayed_signal(); - if (ti_work & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/kernel/signal.c b/kernel/signal.c index 368a34c25bbf..30cd1ca43bcd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1307,43 +1307,6 @@ enum sig_handler { HANDLER_EXIT, /* Only visible as the process exit code */ }; -/* - * On some archictectures, PREEMPT_RT has to delay sending a signal from a - * trap since it cannot enable preemption, and the signal code's - * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME - * which will send the signal on exit of the trap. - */ -#ifdef CONFIG_RT_DELAYED_SIGNALS -static inline bool force_sig_delayed(struct kernel_siginfo *info, - struct task_struct *t) -{ - if (!in_atomic()) - return false; - - if (WARN_ON_ONCE(t->forced_info.si_signo)) - return true; - - if (is_si_special(info)) { - WARN_ON_ONCE(info != SEND_SIG_PRIV); - t->forced_info.si_signo = info->si_signo; - t->forced_info.si_errno = 0; - t->forced_info.si_code = SI_KERNEL; - t->forced_info.si_pid = 0; - t->forced_info.si_uid = 0; - } else { - t->forced_info = *info; - } - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); - return true; -} -#else -static inline bool force_sig_delayed(struct kernel_siginfo *info, - struct task_struct *t) -{ - return false; -} -#endif - /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. @@ -1364,9 +1327,6 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, struct k_sigaction *action; int sig = info->si_signo; - if (force_sig_delayed(info, t)) - return 0; - spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; -- cgit v1.2.3-71-gd317 From 48ec13d36d3ff716a8a08e6583a925def7a2564d Mon Sep 17 00:00:00 2001 From: Joey Gouly Date: Fri, 18 Mar 2022 12:12:33 +0000 Subject: gpio: Properly document parent data union Suppress a warning in the html docs by documenting these fields separately. Signed-off-by: Joey Gouly Link: https://lore.kernel.org/lkml/20211027220118.71a229ab@canb.auug.org.au/ Cc: Linus Walleij Cc: Bartosz Golaszewski Cc: Marc Zyngier Cc: Stephen Rothwell Reviewed-by: Linus Walleij Signed-off-by: Bartosz Golaszewski --- include/linux/gpio/driver.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b0728c8ad90c..98c93510640e 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -168,13 +168,16 @@ struct gpio_irq_chip { /** * @parent_handler_data: + * + * If @per_parent_data is false, @parent_handler_data is a single + * pointer used as the data associated with every parent interrupt. + * * @parent_handler_data_array: * - * Data associated, and passed to, the handler for the parent - * interrupt. Can either be a single pointer if @per_parent_data - * is false, or an array of @num_parents pointers otherwise. If - * @per_parent_data is true, @parent_handler_data_array cannot be - * NULL. + * If @per_parent_data is true, @parent_handler_data_array is + * an array of @num_parents pointers, and is used to associate + * different data for each parent. This cannot be NULL if + * @per_parent_data is true. */ union { void *parent_handler_data; -- cgit v1.2.3-71-gd317 From 229a08a4f4e4f9949801cc39b6480ddc9c487183 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 9 Mar 2022 09:37:31 -0800 Subject: ARM/dma-mapping: Remove CMA code when not built with CMA The MAX_CMA_AREAS could be set to 0, which would result in code that would attempt to operate beyond the end of a zero-sized array. If CONFIG_CMA is disabled, just remove this code entirely. Found when building arm on GCC 10.x for several defconfigs (e.g. axm55xx_defconfig) under -Warray-bounds: arch/arm/mm/dma-mapping.c:396:22: warning: array subscript is outside array bounds of 'struct dma_contig_early_reserve[0]' [-Warray-bounds] 396 | dma_mmu_remap[dma_mmu_remap_num].size = size; | ~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~ arch/arm/mm/dma-mapping.c:389:40: note: while referencing 'dma_mmu_remap' 389 | static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; | ^~~~~~~~~~~~~ Cc: Russell King Cc: Logan Gunthorpe Cc: Martin Oliveira Cc: David Hildenbrand Cc: Andrew Morton Cc: Stephen Rothwell Cc: Zi Yan Cc: Hari Bathini Cc: Minchan Kim Cc: Mike Kravetz Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/all/6243ee60.1c69fb81.16de6.7dbf@mx.google.com/ Signed-off-by: Kees Cook Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/lkml/20220310070041.GA24874@lst.de Reviewed-by: David Hildenbrand Link: https://lore.kernel.org/lkml/9059fa71-330f-f04f-b155-2850abb72a71@redhat.com --- arch/arm/mm/dma-mapping.c | 2 ++ arch/arm/mm/mm.h | 4 ++++ include/linux/cma.h | 4 ---- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4b61541853ea..82ffac621854 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -381,6 +381,7 @@ out: */ postcore_initcall(atomic_pool_init); +#ifdef CONFIG_CMA_AREAS struct dma_contig_early_reserve { phys_addr_t base; unsigned long size; @@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void) iotable_init(&map, 1); } } +#endif static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) { diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 9ff683612f2a..d7ffccb7fea7 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit; void __init bootmem_init(void); void arm_mm_memblock_reserve(void); +#ifdef CONFIG_CMA_AREAS void dma_contiguous_remap(void); +#else +static inline void dma_contiguous_remap(void) { } +#endif unsigned long __clear_cr(unsigned long mask); diff --git a/include/linux/cma.h b/include/linux/cma.h index bd801023504b..2c2ede7f0724 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -12,10 +12,6 @@ */ #ifdef CONFIG_CMA_AREAS #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) - -#else -#define MAX_CMA_AREAS (0) - #endif #define CMA_MAX_NAME 64 -- cgit v1.2.3-71-gd317 From aad5b23ebf21573a32b6f07644f028d64492a5d6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 28 Mar 2022 12:34:31 -0400 Subject: dm: fix dm_io and dm_target_io flags race condition on Alpha Early alpha processors cannot write a single byte or short; they read 8 bytes, modify the value in registers and write back 8 bytes. This could cause race condition in the structure dm_io - if the fields flags and io_count are modified simultaneously. Fix this bug by using 32-bit flags if we are on Alpha and if we are compiling for a processor that doesn't have the byte-word-extension. Signed-off-by: Mikulas Patocka Fixes: bd4a6dd241ae ("dm: reduce size of dm_io and dm_target_io structs") [snitzer: Jens allowed this change since Mikulas owns a relevant Alpha!] Acked-by: Jens Axboe Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 4 ++-- include/linux/blk_types.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 4081cb6cf7b3..4277853c7535 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -210,7 +210,7 @@ struct dm_table { #define DM_TIO_MAGIC 28714 struct dm_target_io { unsigned short magic; - unsigned short flags; + blk_short_t flags; unsigned int target_bio_nr; struct dm_io *io; struct dm_target *ti; @@ -244,7 +244,7 @@ static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) #define DM_IO_MAGIC 19577 struct dm_io { unsigned short magic; - unsigned short flags; + blk_short_t flags; atomic_t io_count; struct mapped_device *md; struct bio *orig_bio; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dd0763a1c674..1973ef9bd40f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -85,8 +85,10 @@ struct block_device { */ #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) typedef u32 __bitwise blk_status_t; +typedef u32 blk_short_t; #else typedef u8 __bitwise blk_status_t; +typedef u16 blk_short_t; #endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) -- cgit v1.2.3-71-gd317 From ebf921a9fac38560e0fc3a4381e163a6969efd5a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 22 Jan 2022 15:46:22 -0500 Subject: readahead: Remove read_cache_pages() With no remaining users, remove this function and the related infrastructure. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- include/linux/pagemap.h | 2 -- mm/readahead.c | 76 ------------------------------------------------- 2 files changed, 78 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a8d0b327b066..993994cd943a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -extern int read_cache_pages(struct address_space *mapping, - struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) diff --git a/mm/readahead.c b/mm/readahead.c index d3a47546d17d..9097af639beb 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -142,82 +142,6 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); -/* - * see if a page needs releasing upon read_cache_pages() failure - * - the caller of read_cache_pages() may have set PG_private or PG_fscache - * before calling, such as the NFS fs marking pages that are cached locally - * on disk, thus we need to give the fs a chance to clean up in the event of - * an error - */ -static void read_cache_pages_invalidate_page(struct address_space *mapping, - struct page *page) -{ - if (page_has_private(page)) { - if (!trylock_page(page)) - BUG(); - page->mapping = mapping; - folio_invalidate(page_folio(page), 0, PAGE_SIZE); - page->mapping = NULL; - unlock_page(page); - } - put_page(page); -} - -/* - * release a list of pages, invalidating them first if need be - */ -static void read_cache_pages_invalidate_pages(struct address_space *mapping, - struct list_head *pages) -{ - struct page *victim; - - while (!list_empty(pages)) { - victim = lru_to_page(pages); - list_del(&victim->lru); - read_cache_pages_invalidate_page(mapping, victim); - } -} - -/** - * read_cache_pages - populate an address space with some pages & start reads against them - * @mapping: the address_space - * @pages: The address of a list_head which contains the target pages. These - * pages have their ->index populated and are otherwise uninitialised. - * @filler: callback routine for filling a single page. - * @data: private data for the callback routine. - * - * Hides the details of the LRU cache etc from the filesystems. - * - * Returns: %0 on success, error return by @filler otherwise - */ -int read_cache_pages(struct address_space *mapping, struct list_head *pages, - int (*filler)(void *, struct page *), void *data) -{ - struct page *page; - int ret = 0; - - while (!list_empty(pages)) { - page = lru_to_page(pages); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, page->index, - readahead_gfp_mask(mapping))) { - read_cache_pages_invalidate_page(mapping, page); - continue; - } - put_page(page); - - ret = filler(data, page); - if (unlikely(ret)) { - read_cache_pages_invalidate_pages(mapping, pages); - break; - } - task_io_account_read(PAGE_SIZE); - } - return ret; -} - -EXPORT_SYMBOL(read_cache_pages); - static void read_pages(struct readahead_control *rac, struct list_head *pages, bool skip_page) { -- cgit v1.2.3-71-gd317 From 704528d895dd3e7b173e672116b4eb2b0a0fceb0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 23 Mar 2022 21:29:04 -0400 Subject: fs: Remove ->readpages address space operation All filesystems have now been converted to use ->readahead, so remove the ->readpages operation and fix all the comments that used to refer to it. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- Documentation/filesystems/fsverity.rst | 6 +++--- Documentation/filesystems/locking.rst | 6 ------ Documentation/filesystems/vfs.rst | 11 ----------- fs/btrfs/reflink.c | 4 ++-- fs/cifs/cifssmb.c | 2 +- fs/cifs/inode.c | 2 +- fs/crypto/crypto.c | 2 +- fs/ext4/readpage.c | 2 +- fs/f2fs/data.c | 4 ++-- fs/fuse/fuse_i.h | 2 +- fs/verity/verify.c | 4 ++-- include/linux/fs.h | 6 ------ include/linux/fsverity.h | 2 +- mm/filemap.c | 2 +- mm/readahead.c | 15 ++------------- 15 files changed, 18 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst index 1d831e3cbcb3..8cc536d08f51 100644 --- a/Documentation/filesystems/fsverity.rst +++ b/Documentation/filesystems/fsverity.rst @@ -549,7 +549,7 @@ Pagecache ~~~~~~~~~ For filesystems using Linux's pagecache, the ``->readpage()`` and -``->readpages()`` methods must be modified to verify pages before they +``->readahead()`` methods must be modified to verify pages before they are marked Uptodate. Merely hooking ``->read_iter()`` would be insufficient, since ``->read_iter()`` is not used for memory maps. @@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or verification. Finally, pages where no decryption or verity error occurred are marked Uptodate, and the pages are unlocked. -Files on ext4 and f2fs may contain holes. Normally, ``->readpages()`` +Files on ext4 and f2fs may contain holes. Normally, ``->readahead()`` simply zeroes holes and sets the corresponding pages Uptodate; no bios are issued. To prevent this case from bypassing fs-verity, these filesystems use fsverity_verify_page() to verify hole pages. @@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document. - To prevent bypassing verification, pages must not be marked Uptodate until they've been verified. Currently, each filesystem is responsible for marking pages Uptodate via - ``->readpages()``. Therefore, currently it's not possible for + ``->readahead()``. Therefore, currently it's not possible for the VFS to do the verification on its own. Changing this would require significant changes to the VFS and all filesystems. diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 2998cec9af4b..c26d854275a0 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -241,8 +241,6 @@ prototypes:: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -274,7 +272,6 @@ readpage: yes, unlocks shared writepages: dirty_folio maybe readahead: yes, unlocks shared -readpages: no shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: @@ -300,9 +297,6 @@ completion. ->readahead() unlocks the pages that I/O is attempted on like ->readpage(). -->readpages() populates the pagecache with the passed pages and starts -I/O against them. They come unlocked upon I/O completion. - ->writepage() is used for two purposes: for "memory cleansing" and for "sync". These are quite different operations and the behaviour may differ depending upon the mode. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 4f14edf93941..794bd1a66bfb 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -726,8 +726,6 @@ cache in your filesystem. The following members are defined: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -817,15 +815,6 @@ cache in your filesystem. The following members are defined: completes successfully. Setting PageError on any page will be ignored; simply unlock the page if an I/O error occurs. -``readpages`` - called by the VM to read pages associated with the address_space - object. This is essentially just a vector version of readpage. - Instead of just one page, several pages are requested. - readpages is only used for read-ahead, so read errors are - ignored. If anything goes wrong, feel free to give up. - This interface is deprecated and will be removed by the end of - 2020; implement readahead instead. - ``write_begin`` Called by the generic buffered write code to ask the filesystem to prepare to write len bytes at the given offset in the file. diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 04a88bfe4fcf..998e3f180d90 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, int ret; /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, loff, dst, dst_loff, len); @@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, } /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, off, inode, destoff, len); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 071e2f21a7db..bc3ded4f34f6 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -597,7 +597,7 @@ CIFSSMBNegotiate(const unsigned int xid, set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); - /* set up max_read for readpages check */ + /* set up max_read for readahead check */ server->max_read = server->maxBuf; server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 60d853c92f6a..2f9e7d2f81b6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode) inode->i_fop = &cifs_file_ops; } - /* check if server can support readpages */ + /* check if server can support readahead */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 4fcca79f39ae..526a4c1bed99 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); * which must still be locked and not uptodate. Normally, blocksize == * PAGE_SIZE and the whole page is decrypted at once. * - * This is for use by the filesystem's ->readpages() method. + * This is for use by the filesystem's ->readahead() method. * * Return: 0 on success; -errno on failure */ diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 1aa26d6634fc..af491e170c4a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work) struct bio *bio = ctx->bio; /* - * fsverity_verify_bio() may call readpages() again, and although verity + * fsverity_verify_bio() may call readahead() again, and although verity * will be disabled for that, decryption may still be needed, causing * another bio_post_read_ctx to be allocated. So to guarantee that * mempool_alloc() never deadlocks we must free the current ctx first. diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index f8fcbe91059b..c92920c8661d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work) bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); /* - * fsverity_verify_bio() may call readpages() again, and while verity + * fsverity_verify_bio() may call readahead() again, and while verity * will be disabled for this, decryption and/or decompression may still * be needed, resulting in another bio_post_read_ctx being allocated. * So to prevent deadlocks we need to release the current ctx to the @@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac) if (!f2fs_is_compress_backend_ready(inode)) return; - /* If the file has inline data, skip readpages */ + /* If the file has inline data, skip readahead */ if (f2fs_has_inline_data(inode)) return; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index eac4984cc753..488b460e046f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -627,7 +627,7 @@ struct fuse_conn { /** Connection successful. Only set in INIT */ unsigned conn_init:1; - /** Do readpages asynchronously? Only set in INIT */ + /** Do readahead asynchronously? Only set in INIT */ unsigned async_read:1; /** Return an unique read error after abort. Only set in INIT */ diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 0adb970f4e73..14e2fb49cff5 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Data verification functions, i.e. hooks for ->readpages() + * Data verification functions, i.e. hooks for ->readahead() * * Copyright 2019 Google LLC */ @@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); * that fail verification are set to the Error state. Verification is skipped * for pages already in the Error state, e.g. due to fscrypt decryption failure. * - * This is a helper function for use by the ->readpages() method of filesystems + * This is a helper function for use by the ->readahead() method of filesystems * that issue bios to read data directly into the page cache. Filesystems that * populate the page cache without issuing bios (e.g. non block-based * filesystems) must instead call fsverity_verify_page() directly on each page. diff --git a/include/linux/fs.h b/include/linux/fs.h index 183160872133..7c81887cc7e8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -370,12 +370,6 @@ struct address_space_operations { /* Mark a folio dirty. Return true if this dirtied it */ bool (*dirty_folio)(struct address_space *, struct folio *); - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index b568b3c7d095..a7afc800bd8d 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * * This checks whether ->i_verity_info has been set. * - * Filesystems call this from ->readpages() to check whether the pages need to + * Filesystems call this from ->readahead() to check whether the pages need to * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) diff --git a/mm/filemap.c b/mm/filemap.c index 647d72bf23b6..d904cd7e4181 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file, * the page cache as the locked folio would then be enough to * synchronize with hole punching. But there are code paths * such as filemap_update_page() filling in partially uptodate - * pages or ->readpages() that need to hold invalidate_lock + * pages or ->readahead() that need to hold invalidate_lock * while mapping blocks for IO so let's hold the lock here as * well to keep locking rules simple. */ diff --git a/mm/readahead.c b/mm/readahead.c index 9097af639beb..297bd0719cda 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -170,13 +170,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, unlock_page(page); put_page(page); } - } else if (aops->readpages) { - aops->readpages(rac->file, rac->mapping, pages, - readahead_count(rac)); - /* Clean up the remaining pages */ - put_pages_list(pages); - rac->_index += rac->_nr_pages; - rac->_nr_pages = 0; } else { while ((page = readahead_page(rac))) { aops->readpage(rac->file, page); @@ -253,10 +246,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) break; - if (mapping->a_ops->readpages) { - folio->index = index + i; - list_add(&folio->lru, &page_pool); - } else if (filemap_add_folio(mapping, folio, index + i, + if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); read_pages(ractl, &page_pool, true); @@ -318,8 +308,7 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && - !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead)) return; /* -- cgit v1.2.3-71-gd317 From a9fcd89d67bb8c4ad613b54ab691fc603c94a03a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 14 Feb 2022 09:13:43 -0500 Subject: fs: Remove read_actor_t This typedef is not used any more. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- include/linux/fs.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7c81887cc7e8..7588d3a0ced8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -357,9 +357,6 @@ typedef struct { int error; } read_descriptor_t; -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, - unsigned long, unsigned long); - struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); -- cgit v1.2.3-71-gd317 From b2403a61308533c576c9dd783fcb73a9186e0b37 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 14 Feb 2022 09:15:34 -0500 Subject: fs, net: Move read_descriptor_t to net.h fs.h has no more need for this typedef; networking is now the sole user of the read_descriptor_t. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- include/linux/fs.h | 19 ------------------- include/linux/net.h | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7588d3a0ced8..8ff28939de60 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -338,25 +338,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) return kiocb->ki_complete == NULL; } -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user *buf; - void *data; - } arg; - int error; -} read_descriptor_t; - struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); diff --git a/include/linux/net.h b/include/linux/net.h index ba736b457a06..12093f4db50c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -125,6 +125,25 @@ struct socket { struct socket_wq wq; }; +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + struct vm_area_struct; struct page; struct sockaddr; -- cgit v1.2.3-71-gd317 From 800ba29547e16d5fbe67ca764ba660e049e9f1bf Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 19 Feb 2022 23:19:49 -0500 Subject: fs: Pass an iocb to generic_perform_write() We can extract both the file pointer and the pos from the iocb. This simplifies each caller as well as allowing generic_perform_write() to see more of the iocb contents in the future. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner Reviewed-by: Al Viro Acked-by: Al Viro --- fs/ceph/file.c | 2 +- fs/ext4/file.c | 2 +- fs/f2fs/file.c | 2 +- fs/nfs/file.c | 2 +- include/linux/fs.h | 2 +- mm/filemap.c | 10 ++++++---- 6 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index feb75eb1cd82..6c9e837aa1d3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1869,7 +1869,7 @@ retry_snap: * are pending vmtruncate. So write and vmtruncate * can not run at the same time */ - written = generic_perform_write(file, from, pos); + written = generic_perform_write(iocb, from); if (likely(written >= 0)) iocb->ki_pos = pos + written; ceph_end_io_write(inode); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8bd66cdc41be..6feb07e3e1eb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, goto out; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; out: diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d3f39a704b8b..5b89af0f27f0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, return -EOPNOTSUPP; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(file, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; if (ret > 0) { diff --git a/fs/nfs/file.c b/fs/nfs/file.c index b0ca244c50d0..150b7fa8f0a7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) result = generic_write_checks(iocb, from); if (result > 0) { current->backing_dev_info = inode_to_bdi(inode); - result = generic_perform_write(file, from, iocb->ki_pos); + result = generic_perform_write(iocb, from); current->backing_dev_info = NULL; } nfs_end_io_write(inode); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8ff28939de60..468dc7ec821f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2999,7 +2999,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); -extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/mm/filemap.c b/mm/filemap.c index d904cd7e4181..3a5ffb5587cd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3752,9 +3752,10 @@ out: } EXPORT_SYMBOL(generic_file_direct_write); -ssize_t generic_perform_write(struct file *file, - struct iov_iter *i, loff_t pos) +ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) { + struct file *file = iocb->ki_filp; + loff_t pos = iocb->ki_pos; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; @@ -3884,7 +3885,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) goto out; - status = generic_perform_write(file, from, pos = iocb->ki_pos); + pos = iocb->ki_pos; + status = generic_perform_write(iocb, from); /* * If generic_perform_write() returned a synchronous error * then we want to return the number of bytes which were @@ -3916,7 +3918,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) */ } } else { - written = generic_perform_write(file, from, iocb->ki_pos); + written = generic_perform_write(iocb, from); if (likely(written > 0)) iocb->ki_pos += written; } -- cgit v1.2.3-71-gd317 From d7414ba14a3a67f81321069219dc7dbc095022c3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 20 Feb 2022 22:28:03 -0500 Subject: filemap: Remove AOP_FLAG_CONT_EXPAND This flag is no longer used, so remove it. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- fs/buffer.c | 3 +-- include/linux/fs.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/buffer.c b/fs/buffer.c index d67fbe063a3a..2b5561ae5d0b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2352,8 +2352,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) if (err) goto out; - err = pagecache_write_begin(NULL, mapping, size, 0, - AOP_FLAG_CONT_EXPAND, &page, &fsdata); + err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); if (err) goto out; diff --git a/include/linux/fs.h b/include/linux/fs.h index 468dc7ec821f..bbde95387a23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -275,7 +275,6 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ #define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ -- cgit v1.2.3-71-gd317 From ada543af3bfe3d953986eca118601b9612382c13 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Fri, 1 Apr 2022 11:28:45 -0700 Subject: mm, kasan: fix __GFP_BITS_SHIFT definition breaking LOCKDEP KASAN changes that added new GFP flags mistakenly updated __GFP_BITS_SHIFT as the total number of GFP bits instead of as a shift used to define __GFP_BITS_MASK. This broke LOCKDEP, as __GFP_BITS_MASK now gets the 25th bit enabled instead of the 28th for __GFP_NOLOCKDEP. Update __GFP_BITS_SHIFT to always count KASAN GFP bits. In the future, we could handle all combinations of KASAN and LOCKDEP to occupy as few bits as possible. For now, we have enough GFP bits to be inefficient in this quick fix. Link: https://lkml.kernel.org/r/462ff52742a1fcc95a69778685737f723ee4dfb3.1648400273.git.andreyknvl@google.com Fixes: 9353ffa6e9e9 ("kasan, page_alloc: allow skipping memory init for HW_TAGS") Fixes: 53ae233c30a6 ("kasan, page_alloc: allow skipping unpoisoning for HW_TAGS") Fixes: f49d9c5bb15c ("kasan, mm: only define ___GFP_SKIP_KASAN_POISON with HW_TAGS") Signed-off-by: Andrey Konovalov Reported-by: Sebastian Andrzej Siewior Tested-by: Sebastian Andrzej Siewior Acked-by: Vlastimil Babka Cc: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fa17fb85de5..761f8f1885c7 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -264,9 +264,7 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (24 + \ - 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ - IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** -- cgit v1.2.3-71-gd317 From df06dae3f2a8bfb83683abf88d3dcde23fc8093d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 23 Feb 2022 16:53:02 +0000 Subject: KVM: Don't actually set a request when evicting vCPUs for GFN cache invd Don't actually set a request bit in vcpu->requests when making a request purely to force a vCPU to exit the guest. Logging a request but not actually consuming it would cause the vCPU to get stuck in an infinite loop during KVM_RUN because KVM would see the pending request and bail from VM-Enter to service the request. Note, it's currently impossible for KVM to set KVM_REQ_GPC_INVALIDATE as nothing in KVM is wired up to set guest_uses_pa=true. But, it'd be all too easy for arch code to introduce use of kvm_gfn_to_pfn_cache_init() without implementing handling of the request, especially since getting test coverage of MMU notifier interaction with specific KVM features usually requires a directed test. Opportunistically rename gfn_to_pfn_cache_invalidate_start()'s wake_vcpus to evict_vcpus. The purpose of the request is to get vCPUs out of guest mode, it's supposed to _avoid_ waking vCPUs that are blocking. Opportunistically rename KVM_REQ_GPC_INVALIDATE to be more specific as to what it wants to accomplish, and to genericize the name so that it can used for similar but unrelated scenarios, should they arise in the future. Add a comment and documentation to explain why the "no action" request exists. Add compile-time assertions to help detect improper usage. Use the inner assertless helper in the one s390 path that makes requests without a hardcoded request. Cc: David Woodhouse Signed-off-by: Sean Christopherson Message-Id: <20220223165302.3205276-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/vcpu-requests.rst | 10 +++++++++ arch/s390/kvm/kvm-s390.c | 2 +- include/linux/kvm_host.h | 38 ++++++++++++++++++++++++++------ virt/kvm/kvm_main.c | 3 ++- virt/kvm/pfncache.c | 18 +++++++++------ 5 files changed, 55 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index b61d48aec36c..db43ee571f5a 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -135,6 +135,16 @@ KVM_REQ_UNHALT such as a pending signal, which does not indicate the VCPU's halt emulation should stop, and therefore does not make the request. +KVM_REQ_OUTSIDE_GUEST_MODE + + This "request" ensures the target vCPU has exited guest mode prior to the + sender of the request continuing on. No action needs be taken by the target, + and so no request is actually logged for the target. This request is similar + to a "kick", but unlike a kick it guarantees the vCPU has actually exited + guest mode. A kick only guarantees the vCPU will exit at some point in the + future, e.g. a previous kick may have started the process, but there's no + guarantee the to-be-kicked vCPU has fully exited guest mode. + KVM_REQUEST_MASK ---------------- diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ca96f84db2cc..91c0cefc94aa 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3463,7 +3463,7 @@ void exit_sie(struct kvm_vcpu *vcpu) /* Kick a guest cpu out of SIE to process a request synchronously */ void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) { - kvm_make_request(req, vcpu); + __kvm_make_request(req, vcpu); kvm_s390_vcpu_request(vcpu); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9536ffa0473b..678fd7914521 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) +#define KVM_REQUEST_NO_ACTION BIT(10) /* * Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. @@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 -#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 +/* + * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to + * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" + * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing + * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous + * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no + * guarantee the vCPU received an IPI and has actually exited guest mode. + */ +#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ @@ -1222,7 +1232,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. * @guest_uses_pa: indicates that the resulting host physical PFN is used while - * @vcpu is IN_GUEST_MODE so invalidations should wake it. + * @vcpu is IN_GUEST_MODE; invalidations of the cache from MMU + * notifiers (but not for KVM memslot changes!) will also force + * @vcpu to exit the guest to refresh the cache. * @kernel_map: requests a kernel virtual mapping (kmap / memremap). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. @@ -1233,10 +1245,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * -EFAULT for an untranslatable guest physical address. * * This primes a gfn_to_pfn_cache and links it into the @kvm's list for - * invalidations to be processed. Invalidation callbacks to @vcpu using - * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM - * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() - * to ensure that the cache is valid before accessing the target page. + * invalidations to be processed. Callers are required to use + * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before + * accessing the target page. */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, struct kvm_vcpu *vcpu, bool guest_uses_pa, @@ -1984,7 +1995,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) void kvm_arch_irq_routing_update(struct kvm *kvm); -static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* * Ensure the rest of the request is published to kvm_check_request's @@ -1994,6 +2005,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } +static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + /* + * Request that don't require vCPU action should never be logged in + * vcpu->requests. The vCPU won't clear the request, so it will stay + * logged indefinitely and prevent the vCPU from entering the guest. + */ + BUILD_BUG_ON(!__builtin_constant_p(req) || + (req & KVM_REQUEST_NO_ACTION)); + + __kvm_make_request(req, vcpu); +} + static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) { return READ_ONCE(vcpu->requests); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 11267d5e4c52..70e05af5ebea 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -253,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, { int cpu; - kvm_make_request(req, vcpu); + if (likely(!(req & KVM_REQUEST_NO_ACTION))) + __kvm_make_request(req, vcpu); if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) return; diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 1621f8efd961..81b2758d0ded 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, { DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); struct gfn_to_pfn_cache *gpc; - bool wake_vcpus = false; + bool evict_vcpus = false; spin_lock(&kvm->gpc_lock); list_for_each_entry(gpc, &kvm->gpc_list, list) { @@ -40,11 +40,11 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, /* * If a guest vCPU could be using the physical address, - * it needs to be woken. + * it needs to be forced out of guest mode. */ if (gpc->guest_uses_pa) { - if (!wake_vcpus) { - wake_vcpus = true; + if (!evict_vcpus) { + evict_vcpus = true; bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); } __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); @@ -67,14 +67,18 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, } spin_unlock(&kvm->gpc_lock); - if (wake_vcpus) { - unsigned int req = KVM_REQ_GPC_INVALIDATE; + if (evict_vcpus) { + /* + * KVM needs to ensure the vCPU is fully out of guest context + * before allowing the invalidation to continue. + */ + unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE; bool called; /* * If the OOM reaper is active, then all vCPUs should have * been stopped already, so perform the request without - * KVM_REQUEST_WAIT and be sad if any needed to be woken. + * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd. */ if (!may_block) req &= ~KVM_REQUEST_WAIT; -- cgit v1.2.3-71-gd317 From d0d96121d03d6d9cf608d948247a9f24f5a02da9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 3 Mar 2022 15:41:11 +0000 Subject: KVM: Use enum to track if cached PFN will be used in guest and/or host Replace the guest_uses_pa and kernel_map booleans in the PFN cache code with a unified enum/bitmask. Using explicit names makes it easier to review and audit call sites. Opportunistically add a WARN to prevent passing garbage; instantating a cache without declaring its usage is either buggy or pointless. Signed-off-by: Sean Christopherson Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/xen.c | 2 +- include/linux/kvm_host.h | 16 ++++++++-------- include/linux/kvm_types.h | 10 ++++++++-- virt/kvm/pfncache.c | 14 +++++++------- 4 files changed, 24 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 4aa0f2b31665..5be1c9227105 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) } do { - ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, + ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, PAGE_SIZE, false); if (ret) goto out; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 678fd7914521..be9bbc0c6200 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1231,11 +1231,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * @gpc: struct gfn_to_pfn_cache object. * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. - * @guest_uses_pa: indicates that the resulting host physical PFN is used while - * @vcpu is IN_GUEST_MODE; invalidations of the cache from MMU - * notifiers (but not for KVM memslot changes!) will also force - * @vcpu to exit the guest to refresh the cache. - * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @usage: indicates if the resulting host physical PFN is used while + * the @vcpu is IN_GUEST_MODE (in which case invalidation of + * the cache from MMU notifiers---but not for KVM memslot + * changes!---will also force @vcpu to exit the guest and + * refresh the cache); and/or if the PFN used directly + * by KVM (and thus needs a kernel virtual mapping). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. * @dirty: mark the cache dirty immediately. @@ -1250,9 +1251,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * accessing the target page. */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty); + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len, bool dirty); /** * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index dceac12c1ce5..784f37cbf33e 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -18,6 +18,7 @@ struct kvm_memslots; enum kvm_mr_change; +#include #include #include @@ -46,6 +47,12 @@ typedef u64 hfn_t; typedef hfn_t kvm_pfn_t; +enum pfn_cache_usage { + KVM_GUEST_USES_PFN = BIT(0), + KVM_HOST_USES_PFN = BIT(1), + KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN, +}; + struct gfn_to_hva_cache { u64 generation; gpa_t gpa; @@ -64,11 +71,10 @@ struct gfn_to_pfn_cache { rwlock_t lock; void *khva; kvm_pfn_t pfn; + enum pfn_cache_usage usage; bool active; bool valid; bool dirty; - bool kernel_map; - bool guest_uses_pa; }; #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 81b2758d0ded..efb69c923027 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, * If a guest vCPU could be using the physical address, * it needs to be forced out of guest mode. */ - if (gpc->guest_uses_pa) { + if (gpc->usage & KVM_GUEST_USES_PFN) { if (!evict_vcpus) { evict_vcpus = true; bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); @@ -224,7 +224,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, goto map_done; } - if (gpc->kernel_map) { + if (gpc->usage & KVM_HOST_USES_PFN) { if (new_pfn == old_pfn) { new_khva = old_khva; old_pfn = KVM_PFN_ERR_FAULT; @@ -304,10 +304,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty) + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len, bool dirty) { + WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); + if (!gpc->active) { rwlock_init(&gpc->lock); @@ -315,8 +316,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpc->pfn = KVM_PFN_ERR_FAULT; gpc->uhva = KVM_HVA_ERR_BAD; gpc->vcpu = vcpu; - gpc->kernel_map = kernel_map; - gpc->guest_uses_pa = guest_uses_pa; + gpc->usage = usage; gpc->valid = false; gpc->active = true; -- cgit v1.2.3-71-gd317 From cf1d88b36ba7e83bdaa50bccc4c47864e8f08cbe Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Mar 2022 15:41:12 +0000 Subject: KVM: Remove dirty handling from gfn_to_pfn_cache completely It isn't OK to cache the dirty status of a page in internal structures for an indefinite period of time. Any time a vCPU exits the run loop to userspace might be its last; the VMM might do its final check of the dirty log, flush the last remaining dirty pages to the destination and complete a live migration. If we have internal 'dirty' state which doesn't get flushed until the vCPU is finally destroyed on the source after migration is complete, then we have lost data because that will escape the final copy. This problem already exists with the use of kvm_vcpu_unmap() to mark pages dirty in e.g. VMX nesting. Note that the actual Linux MM already considers the page to be dirty since we have a writeable mapping of it. This is just about the KVM dirty logging. For the nesting-style use cases (KVM_GUEST_USES_PFN) we will need to track which gfn_to_pfn_caches have been used and explicitly mark the corresponding pages dirty before returning to userspace. But we would have needed external tracking of that anyway, rather than walking the full list of GPCs to find those belonging to this vCPU which are dirty. So let's rely *solely* on that external tracking, and keep it simple rather than laying a tempting trap for callers to fall into. Signed-off-by: David Woodhouse Signed-off-by: Paolo Bonzini Message-Id: <20220303154127.202856-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 4 ++++ arch/x86/kvm/xen.c | 5 ++--- include/linux/kvm_host.h | 14 +++++--------- include/linux/kvm_types.h | 1 - virt/kvm/pfncache.c | 41 ++++++++--------------------------------- 5 files changed, 19 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 1ec05e6bfa6f..04b26c2a7159 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5293,6 +5293,10 @@ type values: KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO Sets the guest physical address of the vcpu_info for a given vCPU. + As with the shared_info page for the VM, the corresponding page may be + dirtied at any time if event channel interrupt delivery is enabled, so + userspace should always assume that the page is dirty without relying + on dirty logging. KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO Sets the guest physical address of an additional pvclock structure diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 5be1c9227105..bf6cc25eee76 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -40,7 +40,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) do { ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN, - gpa, PAGE_SIZE, false); + gpa, PAGE_SIZE); if (ret) goto out; @@ -1025,8 +1025,7 @@ static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm break; idx = srcu_read_lock(&kvm->srcu); - rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, - PAGE_SIZE, false); + rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); srcu_read_unlock(&kvm->srcu, idx); } while(!rc); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index be9bbc0c6200..3f9b22c4983a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1239,7 +1239,6 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * by KVM (and thus needs a kernel virtual mapping). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. @@ -1252,7 +1251,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, - gpa_t gpa, unsigned long len, bool dirty); + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. @@ -1261,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: current guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: %true if the cache is still valid and the address matches. * %false if the cache is not valid. @@ -1283,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: updated guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. @@ -1296,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * with the lock still held to permit access. */ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len, bool dirty); + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. @@ -1304,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. * - * This unmaps the referenced page and marks it dirty, if appropriate. The - * cache is left in the invalid state but at least the mapping from GPA to - * userspace HVA will remain cached and can be reused on a subsequent - * refresh. + * This unmaps the referenced page. The cache is left in the invalid state + * but at least the mapping from GPA to userspace HVA will remain cached + * and can be reused on a subsequent refresh. */ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 784f37cbf33e..ac1ebb37a0ff 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -74,7 +74,6 @@ struct gfn_to_pfn_cache { enum pfn_cache_usage usage; bool active; bool valid; - bool dirty; }; #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index efb69c923027..dd84676615f1 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -49,19 +49,6 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, } __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); } - - /* - * We cannot call mark_page_dirty() from here because - * this physical CPU might not have an active vCPU - * with which to do the KVM dirty tracking. - * - * Neither is there any point in telling the kernel MM - * that the underlying page is dirty. A vCPU in guest - * mode might still be writing to it up to the point - * where we wake them a few lines further down anyway. - * - * So all the dirty marking happens on the unmap. - */ } write_unlock_irq(&gpc->lock); } @@ -108,8 +95,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); -static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, - gpa_t gpa, bool dirty) +static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, gpa_t gpa) { /* Unmap the old page if it was mapped before, and release it */ if (!is_error_noslot_pfn(pfn)) { @@ -122,9 +108,7 @@ static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, #endif } - kvm_release_pfn(pfn, dirty); - if (dirty) - mark_page_dirty(kvm, gpa); + kvm_release_pfn(pfn, false); } } @@ -156,7 +140,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva) } int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len, bool dirty) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); unsigned long page_offset = gpa & ~PAGE_MASK; @@ -164,7 +148,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, unsigned long old_uhva; gpa_t old_gpa; void *old_khva; - bool old_valid, old_dirty; + bool old_valid; int ret = 0; /* @@ -181,14 +165,12 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, old_khva = gpc->khva - offset_in_page(gpc->khva); old_uhva = gpc->uhva; old_valid = gpc->valid; - old_dirty = gpc->dirty; /* If the userspace HVA is invalid, refresh that first */ if (gpc->gpa != gpa || gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) { gfn_t gfn = gpa_to_gfn(gpa); - gpc->dirty = false; gpc->gpa = gpa; gpc->generation = slots->generation; gpc->memslot = __gfn_to_memslot(slots, gfn); @@ -260,14 +242,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, } out: - if (ret) - gpc->dirty = false; - else - gpc->dirty = dirty; - write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + __release_gpc(kvm, old_pfn, old_khva, old_gpa); return ret; } @@ -277,7 +254,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) { void *old_khva; kvm_pfn_t old_pfn; - bool old_dirty; gpa_t old_gpa; write_lock_irq(&gpc->lock); @@ -285,7 +261,6 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) gpc->valid = false; old_khva = gpc->khva - offset_in_page(gpc->khva); - old_dirty = gpc->dirty; old_gpa = gpc->gpa; old_pfn = gpc->pfn; @@ -298,14 +273,14 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) write_unlock_irq(&gpc->lock); - __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + __release_gpc(kvm, old_pfn, old_khva, old_gpa); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, - gpa_t gpa, unsigned long len, bool dirty) + gpa_t gpa, unsigned long len) { WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); @@ -324,7 +299,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, list_add(&gpc->list, &kvm->gpc_list); spin_unlock(&kvm->gpc_lock); } - return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty); + return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len); } EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init); -- cgit v1.2.3-71-gd317 From 18bfee3216fa6f28d55ebf88d824a539d2bec3c7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 30 Mar 2022 09:00:19 +0200 Subject: ftrace: Make ftrace_graph_is_dead() a static branch ftrace_graph_is_dead() is used on hot paths, it just reads a variable in memory and is not worth suffering function call constraints. For instance, at entry of prepare_ftrace_return(), inlining it avoids saving prepare_ftrace_return() parameters to stack and restoring them after calling ftrace_graph_is_dead(). While at it using a static branch is even more performant and is rather well adapted considering that the returned value will almost never change. Inline ftrace_graph_is_dead() and replace 'kill_ftrace_graph' bool by a static branch. The performance improvement is noticeable. Link: https://lkml.kernel.org/r/e0411a6a0ed3eafff0ad2bc9cd4b0e202b4617df.1648623570.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Signed-off-by: Steven Rostedt (Google) --- include/linux/ftrace.h | 16 +++++++++++++++- kernel/trace/fgraph.c | 17 +++-------------- 2 files changed, 18 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 37b619185ec9..f15a4b76cbfc 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -1015,7 +1016,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, extern int register_ftrace_graph(struct fgraph_ops *ops); extern void unregister_ftrace_graph(struct fgraph_ops *ops); -extern bool ftrace_graph_is_dead(void); +/** + * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called + * + * ftrace_graph_stop() is called when a severe error is detected in + * the function graph tracing. This function is called by the critical + * paths of function graph to keep those paths from doing any more harm. + */ +DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph); + +static inline bool ftrace_graph_is_dead(void) +{ + return static_branch_unlikely(&kill_ftrace_graph); +} + extern void ftrace_graph_stop(void); /* The current handlers in use */ diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 19028e072cdb..8f4fb328133a 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -7,6 +7,7 @@ * * Highly modified by Steven Rostedt (VMware). */ +#include #include #include #include @@ -23,24 +24,12 @@ #define ASSIGN_OPS_HASH(opsname, val) #endif -static bool kill_ftrace_graph; +DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); int ftrace_graph_active; /* Both enabled by default (can be cleared by function_graph tracer flags */ static bool fgraph_sleep_time = true; -/** - * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called - * - * ftrace_graph_stop() is called when a severe error is detected in - * the function graph tracing. This function is called by the critical - * paths of function graph to keep those paths from doing any more harm. - */ -bool ftrace_graph_is_dead(void) -{ - return kill_ftrace_graph; -} - /** * ftrace_graph_stop - set to permanently disable function graph tracing * @@ -51,7 +40,7 @@ bool ftrace_graph_is_dead(void) */ void ftrace_graph_stop(void) { - kill_ftrace_graph = true; + static_branch_enable(&kill_ftrace_graph); } /* Add a function return address to the trace stack on thread info.*/ -- cgit v1.2.3-71-gd317 From 5cfff569cab8bf544bab62c911c5d6efd5af5e05 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 1 Apr 2022 14:39:03 -0400 Subject: tracing: Move user_events.h temporarily out of include/uapi While user_events API is under development and has been marked for broken to not let the API become fixed, move the header file out of the uapi directory. This is to prevent it from being installed, then later changed, and then have an old distro user space update with a new kernel, where applications see the user_events being available, but the old header is in place, and then they get compiled incorrectly. Also, surround the include with CONFIG_COMPILE_TEST to the current location, but when the BROKEN tag is taken off, it will use the uapi directory, and fail to compile. This is a good way to remind us to move the header back. Link: https://lore.kernel.org/all/20220330155835.5e1f6669@gandalf.local.home Link: https://lkml.kernel.org/r/20220330201755.29319-1-mathieu.desnoyers@efficios.com Link: https://lkml.kernel.org/r/20220401143903.188384f3@gandalf.local.home Suggested-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- include/linux/user_events.h | 63 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/user_events.h | 63 ---------------------------------------- kernel/trace/trace_events_user.c | 5 ++++ 3 files changed, 68 insertions(+), 63 deletions(-) create mode 100644 include/linux/user_events.h delete mode 100644 include/uapi/linux/user_events.h (limited to 'include/linux') diff --git a/include/linux/user_events.h b/include/linux/user_events.h new file mode 100644 index 000000000000..736e05603463 --- /dev/null +++ b/include/linux/user_events.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave + */ +#ifndef _UAPI_LINUX_USER_EVENTS_H +#define _UAPI_LINUX_USER_EVENTS_H + +#include +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#define USER_EVENTS_SYSTEM "user_events" +#define USER_EVENTS_PREFIX "u:" + +/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ +#define EVENT_BIT_FTRACE 0 +#define EVENT_BIT_PERF 1 +#define EVENT_BIT_OTHER 7 + +#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) +#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) +#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) + +/* Create dynamic location entry within a 32-bit value */ +#define DYN_LOC(offset, size) ((size) << 16 | (offset)) + +/* + * Describes an event registration and stores the results of the registration. + * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum + * must set the size and name_args before invocation. + */ +struct user_reg { + + /* Input: Size of the user_reg structure being used */ + __u32 size; + + /* Input: Pointer to string with event name, description and flags */ + __u64 name_args; + + /* Output: Byte index of the event within the status page */ + __u32 status_index; + + /* Output: Index of the event to use when writing data */ + __u32 write_index; +}; + +#define DIAG_IOC_MAGIC '*' + +/* Requests to register a user_event */ +#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) + +/* Requests to delete a user_event */ +#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) + +#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h deleted file mode 100644 index 736e05603463..000000000000 --- a/include/uapi/linux/user_events.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (c) 2021, Microsoft Corporation. - * - * Authors: - * Beau Belgrave - */ -#ifndef _UAPI_LINUX_USER_EVENTS_H -#define _UAPI_LINUX_USER_EVENTS_H - -#include -#include - -#ifdef __KERNEL__ -#include -#else -#include -#endif - -#define USER_EVENTS_SYSTEM "user_events" -#define USER_EVENTS_PREFIX "u:" - -/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ -#define EVENT_BIT_FTRACE 0 -#define EVENT_BIT_PERF 1 -#define EVENT_BIT_OTHER 7 - -#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) -#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) -#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) - -/* Create dynamic location entry within a 32-bit value */ -#define DYN_LOC(offset, size) ((size) << 16 | (offset)) - -/* - * Describes an event registration and stores the results of the registration. - * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum - * must set the size and name_args before invocation. - */ -struct user_reg { - - /* Input: Size of the user_reg structure being used */ - __u32 size; - - /* Input: Pointer to string with event name, description and flags */ - __u64 name_args; - - /* Output: Byte index of the event within the status page */ - __u32 status_index; - - /* Output: Index of the event to use when writing data */ - __u32 write_index; -}; - -#define DIAG_IOC_MAGIC '*' - -/* Requests to register a user_event */ -#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) - -/* Requests to delete a user_event */ -#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) - -#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 846c27bc7aef..706e1686b5eb 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -18,7 +18,12 @@ #include #include #include +/* Reminder to move to uapi when everything works */ +#ifdef CONFIG_COMPILE_TEST +#include +#else #include +#endif #include "trace.h" #include "trace_dynevent.h" -- cgit v1.2.3-71-gd317 From 1cd927ad6f62f27d8908498dcbf61395c5dd5fe2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 1 Apr 2022 14:39:03 -0400 Subject: tracing: mark user_events as BROKEN After being merged, user_events become more visible to a wider audience that have concerns with the current API. It is too late to fix this for this release, but instead of a full revert, just mark it as BROKEN (which prevents it from being selected in make config). Then we can work finding a better API. If that fails, then it will need to be completely reverted. To not have the code silently bitrot, still allow building it with COMPILE_TEST. And to prevent the uapi header from being installed, then later changed, and then have an old distro user space see the old version, move the header file out of the uapi directory. Surround the include with CONFIG_COMPILE_TEST to the current location, but when the BROKEN tag is taken off, it will use the uapi directory, and fail to compile. This is a good way to remind us to move the header back. Link: https://lore.kernel.org/all/20220330155835.5e1f6669@gandalf.local.home Link: https://lkml.kernel.org/r/20220330201755.29319-1-mathieu.desnoyers@efficios.com Suggested-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) Signed-off-by: Linus Torvalds --- include/linux/user_events.h | 116 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/user_events.h | 116 --------------------------------------- kernel/trace/Kconfig | 1 + kernel/trace/trace_events_user.c | 5 ++ 4 files changed, 122 insertions(+), 116 deletions(-) create mode 100644 include/linux/user_events.h delete mode 100644 include/uapi/linux/user_events.h (limited to 'include/linux') diff --git a/include/linux/user_events.h b/include/linux/user_events.h new file mode 100644 index 000000000000..e570840571e1 --- /dev/null +++ b/include/linux/user_events.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave + */ +#ifndef _UAPI_LINUX_USER_EVENTS_H +#define _UAPI_LINUX_USER_EVENTS_H + +#include +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#define USER_EVENTS_SYSTEM "user_events" +#define USER_EVENTS_PREFIX "u:" + +/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ +#define EVENT_BIT_FTRACE 0 +#define EVENT_BIT_PERF 1 +#define EVENT_BIT_OTHER 7 + +#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) +#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) +#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) + +/* Create dynamic location entry within a 32-bit value */ +#define DYN_LOC(offset, size) ((size) << 16 | (offset)) + +/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ +#define FLAG_BPF_ITER (1 << 0) + +/* + * Describes an event registration and stores the results of the registration. + * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum + * must set the size and name_args before invocation. + */ +struct user_reg { + + /* Input: Size of the user_reg structure being used */ + __u32 size; + + /* Input: Pointer to string with event name, description and flags */ + __u64 name_args; + + /* Output: Byte index of the event within the status page */ + __u32 status_index; + + /* Output: Index of the event to use when writing data */ + __u32 write_index; +}; + +#define DIAG_IOC_MAGIC '*' + +/* Requests to register a user_event */ +#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) + +/* Requests to delete a user_event */ +#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) + +/* Data type that was passed to the BPF program */ +enum { + /* Data resides in kernel space */ + USER_BPF_DATA_KERNEL, + + /* Data resides in user space */ + USER_BPF_DATA_USER, + + /* Data is a pointer to a user_bpf_iter structure */ + USER_BPF_DATA_ITER, +}; + +/* + * Describes an iovec iterator that BPF programs can use to access data for + * a given user_event write() / writev() call. + */ +struct user_bpf_iter { + + /* Offset of the data within the first iovec */ + __u32 iov_offset; + + /* Number of iovec structures */ + __u32 nr_segs; + + /* Pointer to iovec structures */ + const struct iovec *iov; +}; + +/* Context that BPF programs receive when attached to a user_event */ +struct user_bpf_context { + + /* Data type being passed (see union below) */ + __u32 data_type; + + /* Length of the data */ + __u32 data_len; + + /* Pointer to data, varies by data type */ + union { + /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ + void *kdata; + + /* User data (data_type == USER_BPF_DATA_USER) */ + void *udata; + + /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ + struct user_bpf_iter *iter; + }; +}; + +#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h deleted file mode 100644 index e570840571e1..000000000000 --- a/include/uapi/linux/user_events.h +++ /dev/null @@ -1,116 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (c) 2021, Microsoft Corporation. - * - * Authors: - * Beau Belgrave - */ -#ifndef _UAPI_LINUX_USER_EVENTS_H -#define _UAPI_LINUX_USER_EVENTS_H - -#include -#include - -#ifdef __KERNEL__ -#include -#else -#include -#endif - -#define USER_EVENTS_SYSTEM "user_events" -#define USER_EVENTS_PREFIX "u:" - -/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ -#define EVENT_BIT_FTRACE 0 -#define EVENT_BIT_PERF 1 -#define EVENT_BIT_OTHER 7 - -#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) -#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) -#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) - -/* Create dynamic location entry within a 32-bit value */ -#define DYN_LOC(offset, size) ((size) << 16 | (offset)) - -/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ -#define FLAG_BPF_ITER (1 << 0) - -/* - * Describes an event registration and stores the results of the registration. - * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum - * must set the size and name_args before invocation. - */ -struct user_reg { - - /* Input: Size of the user_reg structure being used */ - __u32 size; - - /* Input: Pointer to string with event name, description and flags */ - __u64 name_args; - - /* Output: Byte index of the event within the status page */ - __u32 status_index; - - /* Output: Index of the event to use when writing data */ - __u32 write_index; -}; - -#define DIAG_IOC_MAGIC '*' - -/* Requests to register a user_event */ -#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) - -/* Requests to delete a user_event */ -#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) - -/* Data type that was passed to the BPF program */ -enum { - /* Data resides in kernel space */ - USER_BPF_DATA_KERNEL, - - /* Data resides in user space */ - USER_BPF_DATA_USER, - - /* Data is a pointer to a user_bpf_iter structure */ - USER_BPF_DATA_ITER, -}; - -/* - * Describes an iovec iterator that BPF programs can use to access data for - * a given user_event write() / writev() call. - */ -struct user_bpf_iter { - - /* Offset of the data within the first iovec */ - __u32 iov_offset; - - /* Number of iovec structures */ - __u32 nr_segs; - - /* Pointer to iovec structures */ - const struct iovec *iov; -}; - -/* Context that BPF programs receive when attached to a user_event */ -struct user_bpf_context { - - /* Data type being passed (see union below) */ - __u32 data_type; - - /* Length of the data */ - __u32 data_len; - - /* Pointer to data, varies by data type */ - union { - /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ - void *kdata; - - /* User data (data_type == USER_BPF_DATA_USER) */ - void *udata; - - /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ - struct user_bpf_iter *iter; - }; -}; - -#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9bb54c0b3b2d..2c43e327a619 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -767,6 +767,7 @@ config USER_EVENTS bool "User trace events" select TRACING select DYNAMIC_EVENTS + depends on BROKEN || COMPILE_TEST # API needs to be straighten out help User trace events are user-defined trace events that can be used like an existing kernel trace event. User trace diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 8b3d241a31c2..68d62bfac12f 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -18,7 +18,12 @@ #include #include #include +/* Reminder to move to uapi when everything works */ +#ifdef CONFIG_COMPILE_TEST +#include +#else #include +#endif #include "trace.h" #include "trace_dynevent.h" -- cgit v1.2.3-71-gd317 From 5467801f1fcbdc46bc7298a84dbf3ca1ff2a7320 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 21 Mar 2022 19:02:41 +0530 Subject: gpio: Restrict usage of GPIO chip irq members before initialization GPIO chip irq members are exposed before they could be completely initialized and this leads to race conditions. One such issue was observed for the gc->irq.domain variable which was accessed through the I2C interface in gpiochip_to_irq() before it could be initialized by gpiochip_add_irqchip(). This resulted in Kernel NULL pointer dereference. Following are the logs for reference :- kernel: Call Trace: kernel: gpiod_to_irq+0x53/0x70 kernel: acpi_dev_gpio_irq_get_by+0x113/0x1f0 kernel: i2c_acpi_get_irq+0xc0/0xd0 kernel: i2c_device_probe+0x28a/0x2a0 kernel: really_probe+0xf2/0x460 kernel: RIP: 0010:gpiochip_to_irq+0x47/0xc0 To avoid such scenarios, restrict usage of GPIO chip irq members before they are completely initialized. Signed-off-by: Shreeya Patel Cc: stable@vger.kernel.org Reviewed-by: Andy Shevchenko Reviewed-by: Linus Walleij Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpiolib.c | 19 +++++++++++++++++++ include/linux/gpio/driver.h | 9 +++++++++ 2 files changed, 28 insertions(+) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index e59884cc12a7..085348e08986 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset) { struct irq_domain *domain = gc->irq.domain; +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + if (!gc->irq.initialized) + return -EPROBE_DEFER; +#endif + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; @@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, acpi_gpiochip_request_interrupts(gc); + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above + * GPIO chip irq members. + */ + barrier(); + + gc->irq.initialized = true; + return 0; } diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 98c93510640e..874aabd270c9 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -221,6 +221,15 @@ struct gpio_irq_chip { */ bool per_parent_data; + /** + * @initialized: + * + * Flag to track GPIO chip irq member's initialization. + * This flag will make sure GPIO chip irq members are not used + * before they are initialized. + */ + bool initialized; + /** * @init_hw: optional routine to initialize hardware before * an IRQ chip will be added. This is quite useful when -- cgit v1.2.3-71-gd317 From 8fd4ddda2f49a66bf5dd3d0c01966c4b1971308b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 14 Mar 2022 12:49:36 +0100 Subject: static_call: Don't make __static_call_return0 static System.map shows that vmlinux contains several instances of __static_call_return0(): c0004fc0 t __static_call_return0 c0011518 t __static_call_return0 c00d8160 t __static_call_return0 arch_static_call_transform() uses the middle one to check whether we are setting a call to __static_call_return0 or not: c0011520 : c0011520: 3d 20 c0 01 lis r9,-16383 <== r9 = 0xc001 << 16 c0011524: 39 29 15 18 addi r9,r9,5400 <== r9 += 0x1518 c0011528: 7c 05 48 00 cmpw r5,r9 <== r9 has value 0xc0011518 here So if static_call_update() is called with one of the other instances of __static_call_return0(), arch_static_call_transform() won't recognise it. In order to work properly, global single instance of __static_call_return0() is required. Fixes: 3f2a8fc4b15d ("static_call/x86: Add __static_call_return0()") Signed-off-by: Christophe Leroy Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lkml.kernel.org/r/30821468a0e7d28251954b578e5051dc09300d04.1647258493.git.christophe.leroy@csgroup.eu --- include/linux/static_call.h | 5 +- kernel/Makefile | 3 +- kernel/static_call.c | 541 ------------------------------------------- kernel/static_call_inline.c | 543 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 546 insertions(+), 546 deletions(-) create mode 100644 kernel/static_call_inline.c (limited to 'include/linux') diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 3e56a9751c06..fcc5b48989b3 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -248,10 +248,7 @@ static inline int static_call_text_reserved(void *start, void *end) return 0; } -static inline long __static_call_return0(void) -{ - return 0; -} +extern long __static_call_return0(void); #define EXPORT_STATIC_CALL(name) \ EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ diff --git a/kernel/Makefile b/kernel/Makefile index 471d71935e90..847a82bfe0e3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -114,7 +114,8 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_BPF) += bpf/ obj-$(CONFIG_KCSAN) += kcsan/ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o -obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o +obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o +obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o obj-$(CONFIG_CFI_CLANG) += cfi.o obj-$(CONFIG_PERF_EVENTS) += events/ diff --git a/kernel/static_call.c b/kernel/static_call.c index f2b8baea35d2..e9c3e69f3837 100644 --- a/kernel/static_call.c +++ b/kernel/static_call.c @@ -1,549 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include -#include -#include -#include -#include -#include -#include -#include -#include - -extern struct static_call_site __start_static_call_sites[], - __stop_static_call_sites[]; -extern struct static_call_tramp_key __start_static_call_tramp_key[], - __stop_static_call_tramp_key[]; - -static bool static_call_initialized; - -/* mutex to protect key modules/sites */ -static DEFINE_MUTEX(static_call_mutex); - -static void static_call_lock(void) -{ - mutex_lock(&static_call_mutex); -} - -static void static_call_unlock(void) -{ - mutex_unlock(&static_call_mutex); -} - -static inline void *static_call_addr(struct static_call_site *site) -{ - return (void *)((long)site->addr + (long)&site->addr); -} - -static inline unsigned long __static_call_key(const struct static_call_site *site) -{ - return (long)site->key + (long)&site->key; -} - -static inline struct static_call_key *static_call_key(const struct static_call_site *site) -{ - return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS); -} - -/* These assume the key is word-aligned. */ -static inline bool static_call_is_init(struct static_call_site *site) -{ - return __static_call_key(site) & STATIC_CALL_SITE_INIT; -} - -static inline bool static_call_is_tail(struct static_call_site *site) -{ - return __static_call_key(site) & STATIC_CALL_SITE_TAIL; -} - -static inline void static_call_set_init(struct static_call_site *site) -{ - site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - - (long)&site->key; -} - -static int static_call_site_cmp(const void *_a, const void *_b) -{ - const struct static_call_site *a = _a; - const struct static_call_site *b = _b; - const struct static_call_key *key_a = static_call_key(a); - const struct static_call_key *key_b = static_call_key(b); - - if (key_a < key_b) - return -1; - - if (key_a > key_b) - return 1; - - return 0; -} - -static void static_call_site_swap(void *_a, void *_b, int size) -{ - long delta = (unsigned long)_a - (unsigned long)_b; - struct static_call_site *a = _a; - struct static_call_site *b = _b; - struct static_call_site tmp = *a; - - a->addr = b->addr - delta; - a->key = b->key - delta; - - b->addr = tmp.addr + delta; - b->key = tmp.key + delta; -} - -static inline void static_call_sort_entries(struct static_call_site *start, - struct static_call_site *stop) -{ - sort(start, stop - start, sizeof(struct static_call_site), - static_call_site_cmp, static_call_site_swap); -} - -static inline bool static_call_key_has_mods(struct static_call_key *key) -{ - return !(key->type & 1); -} - -static inline struct static_call_mod *static_call_key_next(struct static_call_key *key) -{ - if (!static_call_key_has_mods(key)) - return NULL; - - return key->mods; -} - -static inline struct static_call_site *static_call_key_sites(struct static_call_key *key) -{ - if (static_call_key_has_mods(key)) - return NULL; - - return (struct static_call_site *)(key->type & ~1); -} - -void __static_call_update(struct static_call_key *key, void *tramp, void *func) -{ - struct static_call_site *site, *stop; - struct static_call_mod *site_mod, first; - - cpus_read_lock(); - static_call_lock(); - - if (key->func == func) - goto done; - - key->func = func; - - arch_static_call_transform(NULL, tramp, func, false); - - /* - * If uninitialized, we'll not update the callsites, but they still - * point to the trampoline and we just patched that. - */ - if (WARN_ON_ONCE(!static_call_initialized)) - goto done; - - first = (struct static_call_mod){ - .next = static_call_key_next(key), - .mod = NULL, - .sites = static_call_key_sites(key), - }; - - for (site_mod = &first; site_mod; site_mod = site_mod->next) { - bool init = system_state < SYSTEM_RUNNING; - struct module *mod = site_mod->mod; - - if (!site_mod->sites) { - /* - * This can happen if the static call key is defined in - * a module which doesn't use it. - * - * It also happens in the has_mods case, where the - * 'first' entry has no sites associated with it. - */ - continue; - } - - stop = __stop_static_call_sites; - - if (mod) { -#ifdef CONFIG_MODULES - stop = mod->static_call_sites + - mod->num_static_call_sites; - init = mod->state == MODULE_STATE_COMING; -#endif - } - - for (site = site_mod->sites; - site < stop && static_call_key(site) == key; site++) { - void *site_addr = static_call_addr(site); - - if (!init && static_call_is_init(site)) - continue; - - if (!kernel_text_address((unsigned long)site_addr)) { - /* - * This skips patching built-in __exit, which - * is part of init_section_contains() but is - * not part of kernel_text_address(). - * - * Skipping built-in __exit is fine since it - * will never be executed. - */ - WARN_ONCE(!static_call_is_init(site), - "can't patch static call site at %pS", - site_addr); - continue; - } - - arch_static_call_transform(site_addr, NULL, func, - static_call_is_tail(site)); - } - } - -done: - static_call_unlock(); - cpus_read_unlock(); -} -EXPORT_SYMBOL_GPL(__static_call_update); - -static int __static_call_init(struct module *mod, - struct static_call_site *start, - struct static_call_site *stop) -{ - struct static_call_site *site; - struct static_call_key *key, *prev_key = NULL; - struct static_call_mod *site_mod; - - if (start == stop) - return 0; - - static_call_sort_entries(start, stop); - - for (site = start; site < stop; site++) { - void *site_addr = static_call_addr(site); - - if ((mod && within_module_init((unsigned long)site_addr, mod)) || - (!mod && init_section_contains(site_addr, 1))) - static_call_set_init(site); - - key = static_call_key(site); - if (key != prev_key) { - prev_key = key; - - /* - * For vmlinux (!mod) avoid the allocation by storing - * the sites pointer in the key itself. Also see - * __static_call_update()'s @first. - * - * This allows architectures (eg. x86) to call - * static_call_init() before memory allocation works. - */ - if (!mod) { - key->sites = site; - key->type |= 1; - goto do_transform; - } - - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); - if (!site_mod) - return -ENOMEM; - - /* - * When the key has a direct sites pointer, extract - * that into an explicit struct static_call_mod, so we - * can have a list of modules. - */ - if (static_call_key_sites(key)) { - site_mod->mod = NULL; - site_mod->next = NULL; - site_mod->sites = static_call_key_sites(key); - - key->mods = site_mod; - - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); - if (!site_mod) - return -ENOMEM; - } - - site_mod->mod = mod; - site_mod->sites = site; - site_mod->next = static_call_key_next(key); - key->mods = site_mod; - } - -do_transform: - arch_static_call_transform(site_addr, NULL, key->func, - static_call_is_tail(site)); - } - - return 0; -} - -static int addr_conflict(struct static_call_site *site, void *start, void *end) -{ - unsigned long addr = (unsigned long)static_call_addr(site); - - if (addr <= (unsigned long)end && - addr + CALL_INSN_SIZE > (unsigned long)start) - return 1; - - return 0; -} - -static int __static_call_text_reserved(struct static_call_site *iter_start, - struct static_call_site *iter_stop, - void *start, void *end, bool init) -{ - struct static_call_site *iter = iter_start; - - while (iter < iter_stop) { - if (init || !static_call_is_init(iter)) { - if (addr_conflict(iter, start, end)) - return 1; - } - iter++; - } - - return 0; -} - -#ifdef CONFIG_MODULES - -static int __static_call_mod_text_reserved(void *start, void *end) -{ - struct module *mod; - int ret; - - preempt_disable(); - mod = __module_text_address((unsigned long)start); - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); - if (!try_module_get(mod)) - mod = NULL; - preempt_enable(); - - if (!mod) - return 0; - - ret = __static_call_text_reserved(mod->static_call_sites, - mod->static_call_sites + mod->num_static_call_sites, - start, end, mod->state == MODULE_STATE_COMING); - - module_put(mod); - - return ret; -} - -static unsigned long tramp_key_lookup(unsigned long addr) -{ - struct static_call_tramp_key *start = __start_static_call_tramp_key; - struct static_call_tramp_key *stop = __stop_static_call_tramp_key; - struct static_call_tramp_key *tramp_key; - - for (tramp_key = start; tramp_key != stop; tramp_key++) { - unsigned long tramp; - - tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp; - if (tramp == addr) - return (long)tramp_key->key + (long)&tramp_key->key; - } - - return 0; -} - -static int static_call_add_module(struct module *mod) -{ - struct static_call_site *start = mod->static_call_sites; - struct static_call_site *stop = start + mod->num_static_call_sites; - struct static_call_site *site; - - for (site = start; site != stop; site++) { - unsigned long s_key = __static_call_key(site); - unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; - unsigned long key; - - /* - * Is the key is exported, 'addr' points to the key, which - * means modules are allowed to call static_call_update() on - * it. - * - * Otherwise, the key isn't exported, and 'addr' points to the - * trampoline so we need to lookup the key. - * - * We go through this dance to prevent crazy modules from - * abusing sensitive static calls. - */ - if (!kernel_text_address(addr)) - continue; - - key = tramp_key_lookup(addr); - if (!key) { - pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n", - static_call_addr(site)); - return -EINVAL; - } - - key |= s_key & STATIC_CALL_SITE_FLAGS; - site->key = key - (long)&site->key; - } - - return __static_call_init(mod, start, stop); -} - -static void static_call_del_module(struct module *mod) -{ - struct static_call_site *start = mod->static_call_sites; - struct static_call_site *stop = mod->static_call_sites + - mod->num_static_call_sites; - struct static_call_key *key, *prev_key = NULL; - struct static_call_mod *site_mod, **prev; - struct static_call_site *site; - - for (site = start; site < stop; site++) { - key = static_call_key(site); - if (key == prev_key) - continue; - - prev_key = key; - - for (prev = &key->mods, site_mod = key->mods; - site_mod && site_mod->mod != mod; - prev = &site_mod->next, site_mod = site_mod->next) - ; - - if (!site_mod) - continue; - - *prev = site_mod->next; - kfree(site_mod); - } -} - -static int static_call_module_notify(struct notifier_block *nb, - unsigned long val, void *data) -{ - struct module *mod = data; - int ret = 0; - - cpus_read_lock(); - static_call_lock(); - - switch (val) { - case MODULE_STATE_COMING: - ret = static_call_add_module(mod); - if (ret) { - WARN(1, "Failed to allocate memory for static calls"); - static_call_del_module(mod); - } - break; - case MODULE_STATE_GOING: - static_call_del_module(mod); - break; - } - - static_call_unlock(); - cpus_read_unlock(); - - return notifier_from_errno(ret); -} - -static struct notifier_block static_call_module_nb = { - .notifier_call = static_call_module_notify, -}; - -#else - -static inline int __static_call_mod_text_reserved(void *start, void *end) -{ - return 0; -} - -#endif /* CONFIG_MODULES */ - -int static_call_text_reserved(void *start, void *end) -{ - bool init = system_state < SYSTEM_RUNNING; - int ret = __static_call_text_reserved(__start_static_call_sites, - __stop_static_call_sites, start, end, init); - - if (ret) - return ret; - - return __static_call_mod_text_reserved(start, end); -} - -int __init static_call_init(void) -{ - int ret; - - if (static_call_initialized) - return 0; - - cpus_read_lock(); - static_call_lock(); - ret = __static_call_init(NULL, __start_static_call_sites, - __stop_static_call_sites); - static_call_unlock(); - cpus_read_unlock(); - - if (ret) { - pr_err("Failed to allocate memory for static_call!\n"); - BUG(); - } - - static_call_initialized = true; - -#ifdef CONFIG_MODULES - register_module_notifier(&static_call_module_nb); -#endif - return 0; -} -early_initcall(static_call_init); long __static_call_return0(void) { return 0; } EXPORT_SYMBOL_GPL(__static_call_return0); - -#ifdef CONFIG_STATIC_CALL_SELFTEST - -static int func_a(int x) -{ - return x+1; -} - -static int func_b(int x) -{ - return x+2; -} - -DEFINE_STATIC_CALL(sc_selftest, func_a); - -static struct static_call_data { - int (*func)(int); - int val; - int expect; -} static_call_data [] __initdata = { - { NULL, 2, 3 }, - { func_b, 2, 4 }, - { func_a, 2, 3 } -}; - -static int __init test_static_call_init(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) { - struct static_call_data *scd = &static_call_data[i]; - - if (scd->func) - static_call_update(sc_selftest, scd->func); - - WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect); - } - - return 0; -} -early_initcall(test_static_call_init); - -#endif /* CONFIG_STATIC_CALL_SELFTEST */ diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c new file mode 100644 index 000000000000..dc5665b62814 --- /dev/null +++ b/kernel/static_call_inline.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct static_call_site __start_static_call_sites[], + __stop_static_call_sites[]; +extern struct static_call_tramp_key __start_static_call_tramp_key[], + __stop_static_call_tramp_key[]; + +static bool static_call_initialized; + +/* mutex to protect key modules/sites */ +static DEFINE_MUTEX(static_call_mutex); + +static void static_call_lock(void) +{ + mutex_lock(&static_call_mutex); +} + +static void static_call_unlock(void) +{ + mutex_unlock(&static_call_mutex); +} + +static inline void *static_call_addr(struct static_call_site *site) +{ + return (void *)((long)site->addr + (long)&site->addr); +} + +static inline unsigned long __static_call_key(const struct static_call_site *site) +{ + return (long)site->key + (long)&site->key; +} + +static inline struct static_call_key *static_call_key(const struct static_call_site *site) +{ + return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS); +} + +/* These assume the key is word-aligned. */ +static inline bool static_call_is_init(struct static_call_site *site) +{ + return __static_call_key(site) & STATIC_CALL_SITE_INIT; +} + +static inline bool static_call_is_tail(struct static_call_site *site) +{ + return __static_call_key(site) & STATIC_CALL_SITE_TAIL; +} + +static inline void static_call_set_init(struct static_call_site *site) +{ + site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) - + (long)&site->key; +} + +static int static_call_site_cmp(const void *_a, const void *_b) +{ + const struct static_call_site *a = _a; + const struct static_call_site *b = _b; + const struct static_call_key *key_a = static_call_key(a); + const struct static_call_key *key_b = static_call_key(b); + + if (key_a < key_b) + return -1; + + if (key_a > key_b) + return 1; + + return 0; +} + +static void static_call_site_swap(void *_a, void *_b, int size) +{ + long delta = (unsigned long)_a - (unsigned long)_b; + struct static_call_site *a = _a; + struct static_call_site *b = _b; + struct static_call_site tmp = *a; + + a->addr = b->addr - delta; + a->key = b->key - delta; + + b->addr = tmp.addr + delta; + b->key = tmp.key + delta; +} + +static inline void static_call_sort_entries(struct static_call_site *start, + struct static_call_site *stop) +{ + sort(start, stop - start, sizeof(struct static_call_site), + static_call_site_cmp, static_call_site_swap); +} + +static inline bool static_call_key_has_mods(struct static_call_key *key) +{ + return !(key->type & 1); +} + +static inline struct static_call_mod *static_call_key_next(struct static_call_key *key) +{ + if (!static_call_key_has_mods(key)) + return NULL; + + return key->mods; +} + +static inline struct static_call_site *static_call_key_sites(struct static_call_key *key) +{ + if (static_call_key_has_mods(key)) + return NULL; + + return (struct static_call_site *)(key->type & ~1); +} + +void __static_call_update(struct static_call_key *key, void *tramp, void *func) +{ + struct static_call_site *site, *stop; + struct static_call_mod *site_mod, first; + + cpus_read_lock(); + static_call_lock(); + + if (key->func == func) + goto done; + + key->func = func; + + arch_static_call_transform(NULL, tramp, func, false); + + /* + * If uninitialized, we'll not update the callsites, but they still + * point to the trampoline and we just patched that. + */ + if (WARN_ON_ONCE(!static_call_initialized)) + goto done; + + first = (struct static_call_mod){ + .next = static_call_key_next(key), + .mod = NULL, + .sites = static_call_key_sites(key), + }; + + for (site_mod = &first; site_mod; site_mod = site_mod->next) { + bool init = system_state < SYSTEM_RUNNING; + struct module *mod = site_mod->mod; + + if (!site_mod->sites) { + /* + * This can happen if the static call key is defined in + * a module which doesn't use it. + * + * It also happens in the has_mods case, where the + * 'first' entry has no sites associated with it. + */ + continue; + } + + stop = __stop_static_call_sites; + + if (mod) { +#ifdef CONFIG_MODULES + stop = mod->static_call_sites + + mod->num_static_call_sites; + init = mod->state == MODULE_STATE_COMING; +#endif + } + + for (site = site_mod->sites; + site < stop && static_call_key(site) == key; site++) { + void *site_addr = static_call_addr(site); + + if (!init && static_call_is_init(site)) + continue; + + if (!kernel_text_address((unsigned long)site_addr)) { + /* + * This skips patching built-in __exit, which + * is part of init_section_contains() but is + * not part of kernel_text_address(). + * + * Skipping built-in __exit is fine since it + * will never be executed. + */ + WARN_ONCE(!static_call_is_init(site), + "can't patch static call site at %pS", + site_addr); + continue; + } + + arch_static_call_transform(site_addr, NULL, func, + static_call_is_tail(site)); + } + } + +done: + static_call_unlock(); + cpus_read_unlock(); +} +EXPORT_SYMBOL_GPL(__static_call_update); + +static int __static_call_init(struct module *mod, + struct static_call_site *start, + struct static_call_site *stop) +{ + struct static_call_site *site; + struct static_call_key *key, *prev_key = NULL; + struct static_call_mod *site_mod; + + if (start == stop) + return 0; + + static_call_sort_entries(start, stop); + + for (site = start; site < stop; site++) { + void *site_addr = static_call_addr(site); + + if ((mod && within_module_init((unsigned long)site_addr, mod)) || + (!mod && init_section_contains(site_addr, 1))) + static_call_set_init(site); + + key = static_call_key(site); + if (key != prev_key) { + prev_key = key; + + /* + * For vmlinux (!mod) avoid the allocation by storing + * the sites pointer in the key itself. Also see + * __static_call_update()'s @first. + * + * This allows architectures (eg. x86) to call + * static_call_init() before memory allocation works. + */ + if (!mod) { + key->sites = site; + key->type |= 1; + goto do_transform; + } + + site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); + if (!site_mod) + return -ENOMEM; + + /* + * When the key has a direct sites pointer, extract + * that into an explicit struct static_call_mod, so we + * can have a list of modules. + */ + if (static_call_key_sites(key)) { + site_mod->mod = NULL; + site_mod->next = NULL; + site_mod->sites = static_call_key_sites(key); + + key->mods = site_mod; + + site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); + if (!site_mod) + return -ENOMEM; + } + + site_mod->mod = mod; + site_mod->sites = site; + site_mod->next = static_call_key_next(key); + key->mods = site_mod; + } + +do_transform: + arch_static_call_transform(site_addr, NULL, key->func, + static_call_is_tail(site)); + } + + return 0; +} + +static int addr_conflict(struct static_call_site *site, void *start, void *end) +{ + unsigned long addr = (unsigned long)static_call_addr(site); + + if (addr <= (unsigned long)end && + addr + CALL_INSN_SIZE > (unsigned long)start) + return 1; + + return 0; +} + +static int __static_call_text_reserved(struct static_call_site *iter_start, + struct static_call_site *iter_stop, + void *start, void *end, bool init) +{ + struct static_call_site *iter = iter_start; + + while (iter < iter_stop) { + if (init || !static_call_is_init(iter)) { + if (addr_conflict(iter, start, end)) + return 1; + } + iter++; + } + + return 0; +} + +#ifdef CONFIG_MODULES + +static int __static_call_mod_text_reserved(void *start, void *end) +{ + struct module *mod; + int ret; + + preempt_disable(); + mod = __module_text_address((unsigned long)start); + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); + if (!try_module_get(mod)) + mod = NULL; + preempt_enable(); + + if (!mod) + return 0; + + ret = __static_call_text_reserved(mod->static_call_sites, + mod->static_call_sites + mod->num_static_call_sites, + start, end, mod->state == MODULE_STATE_COMING); + + module_put(mod); + + return ret; +} + +static unsigned long tramp_key_lookup(unsigned long addr) +{ + struct static_call_tramp_key *start = __start_static_call_tramp_key; + struct static_call_tramp_key *stop = __stop_static_call_tramp_key; + struct static_call_tramp_key *tramp_key; + + for (tramp_key = start; tramp_key != stop; tramp_key++) { + unsigned long tramp; + + tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp; + if (tramp == addr) + return (long)tramp_key->key + (long)&tramp_key->key; + } + + return 0; +} + +static int static_call_add_module(struct module *mod) +{ + struct static_call_site *start = mod->static_call_sites; + struct static_call_site *stop = start + mod->num_static_call_sites; + struct static_call_site *site; + + for (site = start; site != stop; site++) { + unsigned long s_key = __static_call_key(site); + unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; + unsigned long key; + + /* + * Is the key is exported, 'addr' points to the key, which + * means modules are allowed to call static_call_update() on + * it. + * + * Otherwise, the key isn't exported, and 'addr' points to the + * trampoline so we need to lookup the key. + * + * We go through this dance to prevent crazy modules from + * abusing sensitive static calls. + */ + if (!kernel_text_address(addr)) + continue; + + key = tramp_key_lookup(addr); + if (!key) { + pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n", + static_call_addr(site)); + return -EINVAL; + } + + key |= s_key & STATIC_CALL_SITE_FLAGS; + site->key = key - (long)&site->key; + } + + return __static_call_init(mod, start, stop); +} + +static void static_call_del_module(struct module *mod) +{ + struct static_call_site *start = mod->static_call_sites; + struct static_call_site *stop = mod->static_call_sites + + mod->num_static_call_sites; + struct static_call_key *key, *prev_key = NULL; + struct static_call_mod *site_mod, **prev; + struct static_call_site *site; + + for (site = start; site < stop; site++) { + key = static_call_key(site); + if (key == prev_key) + continue; + + prev_key = key; + + for (prev = &key->mods, site_mod = key->mods; + site_mod && site_mod->mod != mod; + prev = &site_mod->next, site_mod = site_mod->next) + ; + + if (!site_mod) + continue; + + *prev = site_mod->next; + kfree(site_mod); + } +} + +static int static_call_module_notify(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct module *mod = data; + int ret = 0; + + cpus_read_lock(); + static_call_lock(); + + switch (val) { + case MODULE_STATE_COMING: + ret = static_call_add_module(mod); + if (ret) { + WARN(1, "Failed to allocate memory for static calls"); + static_call_del_module(mod); + } + break; + case MODULE_STATE_GOING: + static_call_del_module(mod); + break; + } + + static_call_unlock(); + cpus_read_unlock(); + + return notifier_from_errno(ret); +} + +static struct notifier_block static_call_module_nb = { + .notifier_call = static_call_module_notify, +}; + +#else + +static inline int __static_call_mod_text_reserved(void *start, void *end) +{ + return 0; +} + +#endif /* CONFIG_MODULES */ + +int static_call_text_reserved(void *start, void *end) +{ + bool init = system_state < SYSTEM_RUNNING; + int ret = __static_call_text_reserved(__start_static_call_sites, + __stop_static_call_sites, start, end, init); + + if (ret) + return ret; + + return __static_call_mod_text_reserved(start, end); +} + +int __init static_call_init(void) +{ + int ret; + + if (static_call_initialized) + return 0; + + cpus_read_lock(); + static_call_lock(); + ret = __static_call_init(NULL, __start_static_call_sites, + __stop_static_call_sites); + static_call_unlock(); + cpus_read_unlock(); + + if (ret) { + pr_err("Failed to allocate memory for static_call!\n"); + BUG(); + } + + static_call_initialized = true; + +#ifdef CONFIG_MODULES + register_module_notifier(&static_call_module_nb); +#endif + return 0; +} +early_initcall(static_call_init); + +#ifdef CONFIG_STATIC_CALL_SELFTEST + +static int func_a(int x) +{ + return x+1; +} + +static int func_b(int x) +{ + return x+2; +} + +DEFINE_STATIC_CALL(sc_selftest, func_a); + +static struct static_call_data { + int (*func)(int); + int val; + int expect; +} static_call_data [] __initdata = { + { NULL, 2, 3 }, + { func_b, 2, 4 }, + { func_a, 2, 3 } +}; + +static int __init test_static_call_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) { + struct static_call_data *scd = &static_call_data[i]; + + if (scd->func) + static_call_update(sc_selftest, scd->func); + + WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect); + } + + return 0; +} +early_initcall(test_static_call_init); + +#endif /* CONFIG_STATIC_CALL_SELFTEST */ -- cgit v1.2.3-71-gd317 From 5517d500829c683a358a8de04ecb2e28af629ae5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 14 Mar 2022 11:27:35 +0100 Subject: static_call: Properly initialise DEFINE_STATIC_CALL_RET0() When a static call is updated with __static_call_return0() as target, arch_static_call_transform() set it to use an optimised set of instructions which are meant to lay in the same cacheline. But when initialising a static call with DEFINE_STATIC_CALL_RET0(), we get a branch to the real __static_call_return0() function instead of getting the optimised setup: c00d8120 <__SCT__perf_snapshot_branch_stack>: c00d8120: 4b ff ff f4 b c00d8114 <__static_call_return0> c00d8124: 3d 80 c0 0e lis r12,-16370 c00d8128: 81 8c 81 3c lwz r12,-32452(r12) c00d812c: 7d 89 03 a6 mtctr r12 c00d8130: 4e 80 04 20 bctr c00d8134: 38 60 00 00 li r3,0 c00d8138: 4e 80 00 20 blr c00d813c: 00 00 00 00 .long 0x0 Add ARCH_DEFINE_STATIC_CALL_RET0_TRAMP() defined by each architecture to setup the optimised configuration, and rework DEFINE_STATIC_CALL_RET0() to call it: c00d8120 <__SCT__perf_snapshot_branch_stack>: c00d8120: 48 00 00 14 b c00d8134 <__SCT__perf_snapshot_branch_stack+0x14> c00d8124: 3d 80 c0 0e lis r12,-16370 c00d8128: 81 8c 81 3c lwz r12,-32452(r12) c00d812c: 7d 89 03 a6 mtctr r12 c00d8130: 4e 80 04 20 bctr c00d8134: 38 60 00 00 li r3,0 c00d8138: 4e 80 00 20 blr c00d813c: 00 00 00 00 .long 0x0 Signed-off-by: Christophe Leroy Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/1e0a61a88f52a460f62a58ffc2a5f847d1f7d9d8.1647253456.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/static_call.h | 1 + arch/x86/include/asm/static_call.h | 2 ++ include/linux/static_call.h | 20 +++++++++++++++++--- 3 files changed, 20 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h index 0a0bc79bd1fa..de1018cc522b 100644 --- a/arch/powerpc/include/asm/static_call.h +++ b/arch/powerpc/include/asm/static_call.h @@ -24,5 +24,6 @@ #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func) #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") +#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20") #endif /* _ASM_POWERPC_STATIC_CALL_H */ diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h index ed4f8bb6c2d9..2455d721503e 100644 --- a/arch/x86/include/asm/static_call.h +++ b/arch/x86/include/asm/static_call.h @@ -38,6 +38,8 @@ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop") +#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \ + ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0) #define ARCH_ADD_TRAMP_KEY(name) \ asm(".pushsection .static_call_tramp_key, \"a\" \n" \ diff --git a/include/linux/static_call.h b/include/linux/static_call.h index fcc5b48989b3..3c50b0fdda16 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -196,6 +196,14 @@ extern long __static_call_return0(void); }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) +#define DEFINE_STATIC_CALL_RET0(name, _func) \ + DECLARE_STATIC_CALL(name, _func); \ + struct static_call_key STATIC_CALL_KEY(name) = { \ + .func = __static_call_return0, \ + .type = 1, \ + }; \ + ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) + #define static_call_cond(name) (void)__static_call(name) #define EXPORT_STATIC_CALL(name) \ @@ -231,6 +239,12 @@ static inline int static_call_init(void) { return 0; } }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) +#define DEFINE_STATIC_CALL_RET0(name, _func) \ + DECLARE_STATIC_CALL(name, _func); \ + struct static_call_key STATIC_CALL_KEY(name) = { \ + .func = __static_call_return0, \ + }; \ + ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) #define static_call_cond(name) (void)__static_call(name) @@ -284,6 +298,9 @@ static inline long __static_call_return0(void) .func = NULL, \ } +#define DEFINE_STATIC_CALL_RET0(name, _func) \ + __DEFINE_STATIC_CALL(name, _func, __static_call_return0) + static inline void __static_call_nop(void) { } /* @@ -327,7 +344,4 @@ static inline int static_call_text_reserved(void *start, void *end) #define DEFINE_STATIC_CALL(name, _func) \ __DEFINE_STATIC_CALL(name, _func, _func) -#define DEFINE_STATIC_CALL_RET0(name, _func) \ - __DEFINE_STATIC_CALL(name, _func, __static_call_return0) - #endif /* _LINUX_STATIC_CALL_H */ -- cgit v1.2.3-71-gd317 From df21c0d7a94db64a4e1a0d070e26fb02e60fefab Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 14 Mar 2022 11:27:36 +0100 Subject: static_call: Remove __DEFINE_STATIC_CALL macro Only DEFINE_STATIC_CALL use __DEFINE_STATIC_CALL macro now when CONFIG_HAVE_STATIC_CALL is selected. Only keep __DEFINE_STATIC_CALL() for the generic fallback, and also use it to implement DEFINE_STATIC_CALL_NULL() in that case. Signed-off-by: Christophe Leroy Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/329074f92d96e3220ebe15da7bbe2779beee31eb.1647253456.git.christophe.leroy@csgroup.eu --- include/linux/static_call.h | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 3c50b0fdda16..df53bed9d71f 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -180,13 +180,13 @@ extern int static_call_text_reserved(void *start, void *end); extern long __static_call_return0(void); -#define __DEFINE_STATIC_CALL(name, _func, _func_init) \ +#define DEFINE_STATIC_CALL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = _func_init, \ + .func = _func, \ .type = 1, \ }; \ - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) #define DEFINE_STATIC_CALL_NULL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ @@ -225,12 +225,12 @@ extern long __static_call_return0(void); static inline int static_call_init(void) { return 0; } -#define __DEFINE_STATIC_CALL(name, _func, _func_init) \ +#define DEFINE_STATIC_CALL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = _func_init, \ + .func = _func, \ }; \ - ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init) + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) #define DEFINE_STATIC_CALL_NULL(name, _func) \ DECLARE_STATIC_CALL(name, _func); \ @@ -292,11 +292,11 @@ static inline long __static_call_return0(void) .func = _func_init, \ } +#define DEFINE_STATIC_CALL(name, _func) \ + __DEFINE_STATIC_CALL(name, _func, _func) + #define DEFINE_STATIC_CALL_NULL(name, _func) \ - DECLARE_STATIC_CALL(name, _func); \ - struct static_call_key STATIC_CALL_KEY(name) = { \ - .func = NULL, \ - } + __DEFINE_STATIC_CALL(name, _func, NULL) #define DEFINE_STATIC_CALL_RET0(name, _func) \ __DEFINE_STATIC_CALL(name, _func, __static_call_return0) @@ -341,7 +341,4 @@ static inline int static_call_text_reserved(void *start, void *end) #endif /* CONFIG_HAVE_STATIC_CALL */ -#define DEFINE_STATIC_CALL(name, _func) \ - __DEFINE_STATIC_CALL(name, _func, _func) - #endif /* _LINUX_STATIC_CALL_H */ -- cgit v1.2.3-71-gd317 From 2d2f8f083ef29e9b7adfe5cb421368331543473f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Mar 2022 16:58:09 +0200 Subject: Revert "locking/local_lock: Make the empty local_lock_*() function a macro." With volatile removed from arch_raw_cpu_ptr() the compiler no longer creates the per-CPU reference. The usage of the macro can be reverted now. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220328145810.86783-3-bigeasy@linutronix.de --- include/linux/local_lock_internal.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 6d635e8306d6..975e33b793a7 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l) } #else /* CONFIG_DEBUG_LOCK_ALLOC */ # define LOCAL_LOCK_DEBUG_INIT(lockname) -# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0) -# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0) -# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0) +static inline void local_lock_acquire(local_lock_t *l) { } +static inline void local_lock_release(local_lock_t *l) { } +static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } -- cgit v1.2.3-71-gd317 From cdb4f26a63c391317e335e6e683a614358e70aeb Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 6 Jan 2022 14:31:51 +0100 Subject: kobject: kobj_type: remove default_attrs Now that all in-kernel users of default_attrs for the kobj_type are gone and converted to properly use the default_groups pointer instead, it can be safely removed. There is one standard way to create sysfs files in a kobj_type, and not two like before, causing confusion as to which should be used. Cc: "Rafael J. Wysocki" Link: https://lore.kernel.org/r/20220106133151.607703-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/file.c | 13 ------------- include/linux/kobject.h | 1 - lib/kobject.c | 32 -------------------------------- 3 files changed, 46 deletions(-) (limited to 'include/linux') diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 42dcf96881b6..a12ac0356c69 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -703,19 +703,6 @@ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) ktype = get_ktype(kobj); if (ktype) { - struct attribute **kattr; - - /* - * Change owner of the default attributes associated with the - * ktype of @kobj. - */ - for (kattr = ktype->default_attrs; kattr && *kattr; kattr++) { - error = sysfs_file_change_owner(kobj, (*kattr)->name, - kuid, kgid); - if (error) - return error; - } - /* * Change owner of the default groups associated with the * ktype of @kobj. diff --git a/include/linux/kobject.h b/include/linux/kobject.h index c7b47399b36a..57fb972fea05 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -120,7 +120,6 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; - struct attribute **default_attrs; /* use default_groups instead */ const struct attribute_group **default_groups; const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); diff --git a/lib/kobject.c b/lib/kobject.c index 56fa037501b5..5f0e71ab292c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -54,32 +54,6 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) kobj->ktype->get_ownership(kobj, uid, gid); } -/* - * populate_dir - populate directory with attributes. - * @kobj: object we're working on. - * - * Most subsystems have a set of default attributes that are associated - * with an object that registers with them. This is a helper called during - * object registration that loops through the default attributes of the - * subsystem and creates attributes files for them in sysfs. - */ -static int populate_dir(struct kobject *kobj) -{ - const struct kobj_type *t = get_ktype(kobj); - struct attribute *attr; - int error = 0; - int i; - - if (t && t->default_attrs) { - for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { - error = sysfs_create_file(kobj, attr); - if (error) - break; - } - } - return error; -} - static int create_dir(struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); @@ -90,12 +64,6 @@ static int create_dir(struct kobject *kobj) if (error) return error; - error = populate_dir(kobj); - if (error) { - sysfs_remove_dir(kobj); - return error; - } - if (ktype) { error = sysfs_create_groups(kobj, ktype->default_groups); if (error) { -- cgit v1.2.3-71-gd317 From f584b68005ac782097d63a691740cb0dfed072ed Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 4 Apr 2022 15:11:04 -0400 Subject: mm: Add vma_alloc_folio() This wrapper around alloc_pages_vma() calls prep_transhuge_page(), removing the obligation from the caller. This is in the same spirit as __folio_alloc(). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: William Kucharski --- include/linux/gfp.h | 8 ++++++-- mm/mempolicy.c | 13 +++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 761f8f1885c7..3e3d36fc2109 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -613,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages(gfp_t gfp, unsigned int order); struct folio *folio_alloc(gfp_t gfp, unsigned order); -extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, +struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); +struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, + unsigned long addr, bool hugepage); #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages_vma(gfp_mask, order, vma, addr, true) #else @@ -627,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); } -#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\ +#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \ alloc_pages(gfp_mask, order) +#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ + folio_alloc(gfp, order) #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a2516d31db6c..ec15f4f4b714 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2227,6 +2227,19 @@ out: } EXPORT_SYMBOL(alloc_pages_vma); +struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, + unsigned long addr, bool hugepage) +{ + struct folio *folio; + + folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr, + hugepage); + if (folio && order > 1) + prep_transhuge_page(&folio->page); + + return folio; +} + /** * alloc_pages - Allocate pages. * @gfp: GFP flags. -- cgit v1.2.3-71-gd317 From 88dee0cc93adcd83db9d089c1163dc88edafd1c1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 6 Apr 2022 22:34:35 -0400 Subject: NFS: Ensure rpc_run_task() cannot fail in nfs_async_rename() Ensure the call to rpc_run_task() cannot fail by preallocating the rpc_task. Fixes: 910ad38697d9 ("NFS: Fix memory allocation in rpc_alloc_task()") Signed-off-by: Trond Myklebust --- fs/nfs/unlink.c | 1 + include/linux/nfs_xdr.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 5fa11e1aca4c..6f325e10056c 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -347,6 +347,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return ERR_PTR(-ENOMEM); + task_setup_data.task = &data->task; task_setup_data.callback_data = data; data->cred = get_current_cred(); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 49ba486aea5f..2863e5a69c6a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1694,6 +1694,7 @@ struct nfs_unlinkdata { struct nfs_renamedata { struct nfs_renameargs args; struct nfs_renameres res; + struct rpc_task task; const struct cred *cred; struct inode *old_dir; struct dentry *old_dentry; -- cgit v1.2.3-71-gd317 From b71597edfaade119157ded98991bac7160be80c2 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 8 Apr 2022 10:00:42 +0200 Subject: mmc: core: improve API to make clear mmc_hw_reset is for cards To make it unambiguous that mmc_hw_reset() is for cards and not for controllers, we make the function argument mmc_card instead of mmc_host. Also, all users are converted. Suggested-by: Ulf Hansson Signed-off-by: Wolfram Sang Acked-by: Kalle Valo Link: https://lore.kernel.org/r/20220408080045.6497-2-wsa+renesas@sang-engineering.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/block.c | 2 +- drivers/mmc/core/core.c | 5 +++-- drivers/mmc/core/mmc_test.c | 3 +-- drivers/net/wireless/ath/ath10k/sdio.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- drivers/net/wireless/marvell/mwifiex/sdio.c | 2 +- drivers/net/wireless/ti/wlcore/sdio.c | 2 +- include/linux/mmc/core.h | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index db99882c95d8..506dc900f5c7 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, return -EEXIST; md->reset_done |= type; - err = mmc_hw_reset(host); + err = mmc_hw_reset(host->card); /* Ensure we switch back to the correct partition */ if (err) { struct mmc_blk_data *main_md = diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 368f10405e13..c6ae16d40766 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) /** * mmc_hw_reset - reset the card in hardware - * @host: MMC host to which the card is attached + * @card: card to be reset * * Hard reset the card. This function is only for upper layers, like the * block layer or card drivers. You cannot use it in host drivers (struct @@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) * * Return: 0 on success, -errno on failure */ -int mmc_hw_reset(struct mmc_host *host) +int mmc_hw_reset(struct mmc_card *card) { + struct mmc_host *host = card->host; int ret; ret = host->bus_ops->hw_reset(host); diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index e6a2fd2c6d5c..8d9bceeff986 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) static int mmc_test_reset(struct mmc_test_card *test) { struct mmc_card *card = test->card; - struct mmc_host *host = card->host; int err; - err = mmc_hw_reset(host); + err = mmc_hw_reset(card); if (!err) { /* * Reset will re-enable the card's command queue, but tests diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 63e1c2d783c5..73693c66cef1 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar) return; } - ret = mmc_hw_reset(ar_sdio->func->card->host); + ret = mmc_hw_reset(ar_sdio->func->card); if (ret) ath10k_warn(ar, "unable to reset sdio: %d\n", ret); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index ba3c159111d3..55285cad527f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev) /* reset the adapter */ sdio_claim_host(sdiodev->func1); - mmc_hw_reset(sdiodev->func1->card->host); + mmc_hw_reset(sdiodev->func1->card); sdio_release_host(sdiodev->func1); brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bde9e4bbfffe..4f3238d2a171 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) /* Run a HW reset of the SDIO interface. */ sdio_claim_host(func); - ret = mmc_hw_reset(func->card->host); + ret = mmc_hw_reset(func->card); sdio_release_host(func); switch (ret) { diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 72fc41ac83c0..9140b0163474 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) * To guarantee that the SDIO card is power cycled, as required to make * the FW programming to succeed, let's do a brute force HW reset. */ - mmc_hw_reset(card->host); + mmc_hw_reset(card); sdio_enable_func(func); sdio_release_host(func); diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 71101d1ec825..de5c64bbdb72 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -175,7 +175,7 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries); -int mmc_hw_reset(struct mmc_host *host); +int mmc_hw_reset(struct mmc_card *card); int mmc_sw_reset(struct mmc_host *host); void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card); -- cgit v1.2.3-71-gd317 From a431dbbc540532b7465eae4fc8b56a85a9fc7d17 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Fri, 8 Apr 2022 13:09:01 -0700 Subject: mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning The gcc 12 compiler reports a "'mem_section' will never be NULL" warning on the following code: static inline struct mem_section *__nr_to_section(unsigned long nr) { #ifdef CONFIG_SPARSEMEM_EXTREME if (!mem_section) return NULL; #endif if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; : It happens with CONFIG_SPARSEMEM_EXTREME off. The mem_section definition is #ifdef CONFIG_SPARSEMEM_EXTREME extern struct mem_section **mem_section; #else extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif In the !CONFIG_SPARSEMEM_EXTREME case, mem_section is a static 2-dimensional array and so the check "!mem_section[SECTION_NR_TO_ROOT(nr)]" doesn't make sense. Fix this warning by moving the "!mem_section[SECTION_NR_TO_ROOT(nr)]" check up inside the CONFIG_SPARSEMEM_EXTREME block and adding an explicit NR_SECTION_ROOTS check to make sure that there is no out-of-bound array access. Link: https://lkml.kernel.org/r/20220331180246.2746210-1-longman@redhat.com Fixes: 3e347261a80b ("sparsemem extreme implementation") Signed-off-by: Waiman Long Reported-by: Justin Forbes Cc: "Kirill A . Shutemov" Cc: Ingo Molnar Cc: Rafael Aquini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 962b14d403e8..46ffab808f03 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1397,13 +1397,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms) static inline struct mem_section *__nr_to_section(unsigned long nr) { + unsigned long root = SECTION_NR_TO_ROOT(nr); + + if (unlikely(root >= NR_SECTION_ROOTS)) + return NULL; + #ifdef CONFIG_SPARSEMEM_EXTREME - if (!mem_section) + if (!mem_section || !mem_section[root]) return NULL; #endif - if (!mem_section[SECTION_NR_TO_ROOT(nr)]) - return NULL; - return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; + return &mem_section[root][nr & SECTION_ROOT_MASK]; } extern size_t mem_section_usage_size(void); -- cgit v1.2.3-71-gd317